hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r..@apache.org
Subject [13/35] incubator-hawq git commit: SGA import. Now with files previously missing because of the .gitignore issue
Date Tue, 22 Sep 2015 19:14:16 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/__init__.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/__init__.py b/tools/bin/ext/__init__.py
new file mode 100644
index 0000000..bd233a8
--- /dev/null
+++ b/tools/bin/ext/__init__.py
@@ -0,0 +1,290 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+try:
+    from cyaml import *
+except ImportError:
+    pass
+
+def scan(stream, Loader=Loader):
+    """
+    Scan a YAML stream and produce scanning tokens.
+    """
+    loader = Loader(stream)
+    while loader.check_token():
+        yield loader.get_token()
+
+def parse(stream, Loader=Loader):
+    """
+    Parse a YAML stream and produce parsing events.
+    """
+    loader = Loader(stream)
+    while loader.check_event():
+        yield loader.get_event()
+
+def compose(stream, Loader=Loader):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding representation tree.
+    """
+    loader = Loader(stream)
+    if loader.check_node():
+        return loader.get_node()
+
+def compose_all(stream, Loader=Loader):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponsing representation trees.
+    """
+    loader = Loader(stream)
+    while loader.check_node():
+        yield loader.get_node()
+
+def load_all(stream, Loader=Loader):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    """
+    loader = Loader(stream)
+    while loader.check_data():
+        yield loader.get_data()
+
+def load(stream, Loader=Loader):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    """
+    loader = Loader(stream)
+    if loader.check_data():
+        return loader.get_data()
+
+def safe_load_all(stream):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    Resolve only basic YAML tags.
+    """
+    return load_all(stream, SafeLoader)
+
+def safe_load(stream):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    Resolve only basic YAML tags.
+    """
+    return load(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None):
+    """
+    Emit YAML parsing events into a stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        try:
+            from cStringIO import StringIO
+        except ImportError:
+            from StringIO import StringIO
+        stream = StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break)
+    for event in events:
+        dumper.emit(event)
+    if getvalue:
+        return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding='utf-8', explicit_start=None, explicit_end=None,
+        version=None, tags=None):
+    """
+    Serialize a sequence of representation trees into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        try:
+            from cStringIO import StringIO
+        except ImportError:
+            from StringIO import StringIO
+        stream = StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end)
+    dumper.open()
+    for node in nodes:
+        dumper.serialize(node)
+    dumper.close()
+    if getvalue:
+        return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a representation tree into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+        default_style=None, default_flow_style=None,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding='utf-8', explicit_start=None, explicit_end=None,
+        version=None, tags=None):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        try:
+            from cStringIO import StringIO
+        except ImportError:
+            from StringIO import StringIO
+        stream = StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, default_style=default_style,
+            default_flow_style=default_flow_style,
+            canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end)
+    dumper.open()
+    for data in documents:
+        dumper.represent(data)
+    dumper.close()
+    if getvalue:
+        return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+        Loader=Loader, Dumper=Dumper):
+    """
+    Add an implicit scalar detector.
+    If an implicit scalar value matches the given regexp,
+    the corresponding tag is assigned to the scalar.
+    first is a sequence of possible initial characters or None.
+    """
+    Loader.add_implicit_resolver(tag, regexp, first)
+    Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+    """
+    Add a path based resolver for the given tag.
+    A path is a list of keys that forms a path
+    to a node in the representation tree.
+    Keys can be string values, integers, or None.
+    """
+    Loader.add_path_resolver(tag, path, kind)
+    Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+    """
+    Add a constructor for the given tag.
+    Constructor is a function that accepts a Loader instance
+    and a node object and produces the corresponding Python object.
+    """
+    Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+    """
+    Add a multi-constructor for the given tag prefix.
+    Multi-constructor is called for a node if its tag starts with tag_prefix.
+    Multi-constructor accepts a Loader instance, a tag suffix,
+    and a node object and produces the corresponding Python object.
+    """
+    Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Representer is a function accepting a Dumper instance
+    and an instance of the given data type
+    and producing the corresponding representation node.
+    """
+    Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Multi-representer is a function accepting a Dumper instance
+    and an instance of the given data type or subtype
+    and producing the corresponding representation node.
+    """
+    Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+    """
+    The metaclass for YAMLObject.
+    """
+    def __init__(cls, name, bases, kwds):
+        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+        if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+            cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+            cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+    """
+    An object that can dump itself to a YAML stream
+    and load itself from a YAML stream.
+    """
+
+    __metaclass__ = YAMLObjectMetaclass
+    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
+
+    yaml_loader = Loader
+    yaml_dumper = Dumper
+
+    yaml_tag = None
+    yaml_flow_style = None
+
+    def from_yaml(cls, loader, node):
+        """
+        Convert a representation node to a Python object.
+        """
+        return loader.construct_yaml_object(node, cls)
+    from_yaml = classmethod(from_yaml)
+
+    def to_yaml(cls, dumper, data):
+        """
+        Convert a Python object to a representation node.
+        """
+        return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+                flow_style=cls.yaml_flow_style)
+    to_yaml = classmethod(to_yaml)
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/__init__.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/__init__.py b/tools/bin/ext/figleaf/__init__.py
new file mode 100644
index 0000000..9508655
--- /dev/null
+++ b/tools/bin/ext/figleaf/__init__.py
@@ -0,0 +1,309 @@
+"""
+figleaf is another tool to trace Python code coverage.
+
+figleaf uses the sys.settrace hook to record which statements are
+executed by the CPython interpreter; this record can then be saved
+into a file, or otherwise communicated back to a reporting script.
+
+figleaf differs from the gold standard of Python coverage tools
+('coverage.py') in several ways.  First and foremost, figleaf uses the
+same criterion for "interesting" lines of code as the sys.settrace
+function, which obviates some of the complexity in coverage.py (but
+does mean that your "loc" count goes down).  Second, figleaf does not
+record code executed in the Python standard library, which results in
+a significant speedup.  And third, the format in which the coverage
+format is saved is very simple and easy to work with.
+
+You might want to use figleaf if you're recording coverage from
+multiple types of tests and need to aggregate the coverage in
+interesting ways, and/or control when coverage is recorded.
+coverage.py is a better choice for command-line execution, and its
+reporting is a fair bit nicer.
+
+Command line usage: ::
+
+  figleaf <python file to execute> <args to python file>
+
+The figleaf output is saved into the file '.figleaf', which is an
+*aggregate* of coverage reports from all figleaf runs from this
+directory.  '.figleaf' contains a pickled dictionary of sets; the keys
+are source code filenames, and the sets contain all line numbers
+executed by the Python interpreter. See the docs or command-line
+programs in bin/ for more information.
+
+High level API: ::
+
+ * ``start(ignore_lib=True)`` -- start recording code coverage.
+ * ``stop()``                 -- stop recording code coverage.
+ * ``get_trace_obj()``        -- return the (singleton) trace object.
+ * ``get_info()``             -- get the coverage dictionary
+
+Classes & functions worth knowing about (lower level API):
+
+ * ``get_lines(fp)`` -- return the set of interesting lines in the fp.
+ * ``combine_coverage(d1, d2)`` -- combine coverage info from two dicts.
+ * ``read_coverage(filename)`` -- load the coverage dictionary
+ * ``write_coverage(filename)`` -- write the coverage out.
+ * ``annotate_coverage(...)`` -- annotate a Python file with its coverage info.
+
+Known problems:
+
+ -- module docstrings are *covered* but not found.
+
+AUTHOR: C. Titus Brown, titus@idyll.org, with contributions from Iain Lowe.
+
+'figleaf' is Copyright (C) 2006, 2007 C. Titus Brown.  It is under the
+BSD license.
+"""
+__version__ = "0.6.1"
+
+# __all__ == @CTB
+
+import sys
+import os
+from cPickle import dump, load
+from optparse import OptionParser
+
+import internals
+
+# use builtin sets if in >= 2.4, otherwise use 'sets' module.
+try:
+    set()
+except NameError:
+    from sets import Set as set
+
+def get_lines(fp):
+    """
+    Return the set of interesting lines in the source code read from
+    this file handle.
+    """
+    # rstrip is a workaround for http://bugs.python.org/issue4262
+    src = fp.read().rstrip() + "\n"
+    code = compile(src, "", "exec")
+    
+    return internals.get_interesting_lines(code)
+
+def combine_coverage(d1, d2):
+    """
+    Given two coverage dictionaries, combine the recorded coverage
+    and return a new dictionary.
+    """
+    keys = set(d1.keys())
+    keys.update(set(d2.keys()))
+
+    new_d = {}
+    for k in keys:
+        v = d1.get(k, set())
+        v2 = d2.get(k, set())
+
+        s = set(v)
+        s.update(v2)
+        new_d[k] = s
+
+    return new_d
+
+def write_coverage(filename, append=True):
+    """
+    Write the current coverage info out to the given filename.  If
+    'append' is false, destroy any previously recorded coverage info.
+    """
+    if _t is None:
+        return
+
+    data = internals.CoverageData(_t)
+
+    d = data.gather_files()
+
+    # sum existing coverage?
+    if append:
+        old = {}
+        fp = None
+        try:
+            fp = open(filename)
+        except IOError:
+            pass
+
+        if fp:
+            old = load(fp)
+            fp.close()
+            d = combine_coverage(d, old)
+
+    # ok, save.
+    outfp = open(filename, 'w')
+    try:
+        dump(d, outfp)
+    finally:
+        outfp.close()
+
+def read_coverage(filename):
+    """
+    Read a coverage dictionary in from the given file.
+    """
+    fp = open(filename)
+    try:
+        d = load(fp)
+    finally:
+        fp.close()
+
+    return d
+
+def dump_pickled_coverage(out_fp):
+    """
+    Dump coverage information in pickled format into the given file handle.
+    """
+    dump(_t, out_fp)
+
+def load_pickled_coverage(in_fp):
+    """
+    Replace (overwrite) coverage information from the given file handle.
+    """
+    global _t
+    _t = load(in_fp)
+
+def annotate_coverage(in_fp, out_fp, covered, all_lines,
+                      mark_possible_lines=False):
+    """
+    A simple example coverage annotator that outputs text.
+    """
+    for i, line in enumerate(in_fp):
+        i = i + 1
+
+        if i in covered:
+            symbol = '>'
+        elif i in all_lines:
+            symbol = '!'
+        else:
+            symbol = ' '
+
+        symbol2 = ''
+        if mark_possible_lines:
+            symbol2 = ' '
+            if i in all_lines:
+                symbol2 = '-'
+
+        out_fp.write('%s%s %s' % (symbol, symbol2, line,))
+
+def get_data():
+    if _t:
+        return internals.CoverageData(_t)
+
+#######################
+
+#
+# singleton functions/top-level API
+#
+
+_t = None
+
+def init(exclude_path=None, include_only=None):
+    from internals import CodeTracer
+    
+    global _t
+    if _t is None:
+        _t = CodeTracer(exclude_path, include_only)
+
+def start(ignore_python_lib=True):
+    """
+    Start tracing code coverage.  If 'ignore_python_lib' is True on
+    initial call, ignore all files that live below the same directory as
+    the 'os' module.
+    """
+    global _t
+    if not _t:
+        exclude_path = None
+        if ignore_python_lib:
+            exclude_path = os.path.realpath(os.path.dirname(os.__file__))
+
+        init(exclude_path, None)
+    
+    _t.start()
+
+def start_section(name):
+    global _t
+    _t.start_section(name)
+    
+def stop_section():
+    global _t
+    _t.stop_section()
+
+def stop():
+    """
+    Stop tracing code coverage.
+    """
+    global _t
+    if _t is not None:
+        _t.stop()
+
+def get_trace_obj():
+    """
+    Return the (singleton) trace object, if it exists.
+    """
+    return _t
+
+def get_info(section_name=None):
+    """
+    Get the coverage dictionary from the trace object.
+    """
+    if _t:
+        return get_data().gather_files(section_name)
+
+#############
+
+def display_ast():
+    l = internals.LineGrabber(open(sys.argv[1]))
+    l.pretty_print()
+    print l.lines
+
+def main():
+    """
+    Execute the given Python file with coverage, making it look like it is
+    __main__.
+    """
+    ignore_pylibs = False
+
+    # gather args
+
+    n = 1
+    figleaf_args = []
+    for n in range(1, len(sys.argv)):
+        arg = sys.argv[n]
+        if arg.startswith('-'):
+            figleaf_args.append(arg)
+        else:
+            break
+
+    remaining_args = sys.argv[n:]
+
+    usage = "usage: %prog [options] [python_script arg1 arg2 ...]"
+    option_parser = OptionParser(usage=usage)
+
+    option_parser.add_option('-i', '--ignore-pylibs', action="store_true",
+                             dest="ignore_pylibs", default=False,
+                             help="ignore Python library modules")
+
+    (options, args) = option_parser.parse_args(args=figleaf_args)
+    assert len(args) == 0
+
+    if not remaining_args:
+        option_parser.error("you must specify a python script to run!")
+
+    ignore_pylibs = options.ignore_pylibs
+
+    ## Reset system args so that the subsequently exec'd file can read
+    ## from sys.argv
+    
+    sys.argv = remaining_args
+
+    sys.path[0] = os.path.dirname(sys.argv[0])
+
+    cwd = os.getcwd()
+
+    start(ignore_pylibs)        # START code coverage
+
+    import __main__
+    try:
+        execfile(sys.argv[0], __main__.__dict__)
+    finally:
+        stop()                          # STOP code coverage
+
+        write_coverage(os.path.join(cwd, '.figleaf'))

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/_lib.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/_lib.py b/tools/bin/ext/figleaf/_lib.py
new file mode 100644
index 0000000..97e5e83
--- /dev/null
+++ b/tools/bin/ext/figleaf/_lib.py
@@ -0,0 +1,6 @@
+import os.path, sys
+libdir = os.path.join(os.path.dirname(__file__), '../')
+libdir = os.path.normpath(libdir)
+
+if libdir not in sys.path:
+    sys.path.insert(0, libdir)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/annotate.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/annotate.py b/tools/bin/ext/figleaf/annotate.py
new file mode 100644
index 0000000..ed674d9
--- /dev/null
+++ b/tools/bin/ext/figleaf/annotate.py
@@ -0,0 +1,225 @@
+"""
+Common functions for annotating files with figleaf coverage information.
+"""
+import sys, os
+from optparse import OptionParser
+import ConfigParser
+import re
+import logging
+
+import figleaf
+
+thisdir = os.path.dirname(__file__)
+
+try:                                    # 2.3 compatibility
+    logging.basicConfig(format='%(message)s', level=logging.WARNING)
+except TypeError:
+    pass
+
+logger = logging.getLogger('figleaf.annotate')
+
+DEFAULT_CONFIGURE_FILE = ".figleafrc"
+
+### utilities
+
+def safe_conf_get(conf, section, name, default):
+    try:
+        val = conf.get(section, name)
+    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+        val = default
+
+    return val
+
+def configure(parser):
+    """
+    Configure the optparse.OptionParser object with defaults, optionally
+    loaded from a configuration file.
+    """
+    CONFIG_FILE = os.environ.get('FIGLEAFRC', DEFAULT_CONFIGURE_FILE)
+    
+    parser.add_option("-c", "--coverage-file", action="store",
+                       type="string", dest="coverage_file",
+                       help="File containing figleaf coverage information.")
+    
+    parser.add_option("-s", "--sections-file", action="store",
+                       type="string", dest="sections_file",
+                       help="File containing figleaf sections coverage info.")
+
+    parser.add_option("-v", "--verbose", action="store_true",
+                      dest="verbose")
+
+    conf_file = ConfigParser.ConfigParser()
+    conf_file.read(CONFIG_FILE)         # ignores if not present
+
+    default_coverage_file = safe_conf_get(conf_file,
+                                          'figleaf', 'coverage_file',
+                                          '.figleaf')
+    default_sections_file = safe_conf_get(conf_file,
+                                          'figleaf', 'sections_file',
+                                          '.figleaf_sections')
+    default_verbose = int(safe_conf_get(conf_file, 'figleaf', 'verbose',
+                                        0))
+
+    parser.set_defaults(coverage_file=default_coverage_file,
+                        sections_file=default_sections_file,
+                        verbose=default_verbose)
+
+def filter_coverage(coverage, re_match):
+    """
+    ...
+    """
+    if not re_match:
+        return coverage
+
+    regexp = re.compile(re_match)
+    
+    d = {}
+    for filename, lines in coverage.items():
+        if regexp.match(filename):
+            d[filename] = lines
+            
+    return d
+
+### commands
+
+def list(options, match=""):
+    """
+    List the filenames in the coverage file, optionally limiting it to
+    those files matching to the regexp 'match'.
+    """
+    if options.verbose:
+        print>>sys.stderr, '** Reading coverage from coverage file %s' % \
+                           (options.coverage_file,)
+        if match:
+            print>>sys.stderr, '** Filtering against regexp "%s"' % (match,)
+        
+    coverage = figleaf.read_coverage(options.coverage_file)
+    coverage = filter_coverage(coverage, match)
+
+    for filename in coverage.keys():
+        print filename
+
+def list_sections(options, match=""):
+    """
+    List the filenames in the coverage file, optionally limiting it to
+    those files matching to the regexp 'match'.
+    """
+    if options.verbose:
+        print>>sys.stderr, '** Reading sections info from sections file %s' % \
+                           (options.sections_file,)
+        if match:
+            print>>sys.stderr, '** Filtering against regexp "%s"' % (match,)
+
+    fp = open(options.sections_file)
+    figleaf.load_pickled_coverage(fp) # @CTB
+
+    data = figleaf.internals.CoverageData(figleaf._t)
+    coverage = data.gather_files()
+    coverage = filter_coverage(coverage, match)
+
+    for filename in coverage.keys():
+        print filename
+
+###
+
+def read_exclude_patterns(filename):
+    """
+    Read in exclusion patterns from a file; these are just regexps.
+    """
+    if not filename:
+        return []
+
+    exclude_patterns = []
+
+    fp = open(filename)
+    for line in fp:
+        line = line.rstrip()
+        if line and not line.startswith('#'):
+            pattern = re.compile(line)
+        exclude_patterns.append(pattern)
+
+    return exclude_patterns
+
+def read_files_list(filename):
+    """
+    Read in a list of files from a file; these are relative or absolute paths.
+    """
+    s = {}
+    for line in open(filename):
+        f = line.strip()
+        s[os.path.abspath(f)] = 1
+
+    return s
+
+def filter_files(filenames, exclude_patterns = [], files_list = {}):
+    files_list = dict(files_list)       # make copy
+
+    # list of files specified?
+    if files_list:
+        for filename in files_list.keys():
+            yield filename
+
+        filenames = [ os.path.abspath(x) for x in filenames ]
+        for filename in filenames:
+            try:
+                del files_list[filename]
+            except KeyError:
+                logger.info('SKIPPING %s -- not in files list' % (filename,))
+            
+        return
+
+    ### no files list given -- handle differently
+
+    for filename in filenames:
+        abspath = os.path.abspath(filename)
+        
+        # check to see if we match anything in the exclude_patterns list
+        skip = False
+        for pattern in exclude_patterns:
+            if pattern.search(filename):
+                logger.info('SKIPPING %s -- matches exclusion pattern' % \
+                            (filename,))
+                skip = True
+                break
+
+        if skip:
+            continue
+
+        # next, check to see if we're part of the figleaf package.
+        if thisdir in filename:
+            logger.debug('SKIPPING %s -- part of the figleaf package' % \
+                         (filename,))
+            continue
+
+        # also, check for <string> (source file from things like 'exec'):
+        if filename == '<string>':
+            continue
+
+        # miscellaneous other things: doctests
+        if filename.startswith('<doctest '):
+            continue
+
+        yield filename
+
+###
+
+def main():
+    parser = OptionParser()
+    configure(parser)
+    
+    options, args = parser.parse_args()
+
+    if not len(args):
+        print "ERROR: You must specify a command like 'list' or 'report'.  Use"
+        print "\n    %s -h\n" % (sys.argv[0],)
+        print "for help on commands and options."
+        sys.exit(-1)
+        
+    cmd = args.pop(0)
+
+    if cmd == 'list':
+        list(options, *args)
+    elif cmd == 'list_sections':
+        list_sections(options, *args)
+
+    sys.exit(0)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/annotate_cover.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/annotate_cover.py b/tools/bin/ext/figleaf/annotate_cover.py
new file mode 100644
index 0000000..bf48bda
--- /dev/null
+++ b/tools/bin/ext/figleaf/annotate_cover.py
@@ -0,0 +1,143 @@
+import figleaf
+import os
+import re
+
+from annotate import read_exclude_patterns, filter_files, logger
+
+def report_as_cover(coverage, exclude_patterns=[], ):
+    ### now, output.
+
+    keys = coverage.keys()
+    info_dict = {}
+    
+    for k in filter_files(keys):
+        try:
+            pyfile = open(k, 'rU')
+            lines = figleaf.get_lines(pyfile)
+        except IOError:
+            logger.warning('CANNOT OPEN: %s' % k)
+            continue
+        except KeyboardInterrupt:
+            raise
+        except Exception, e:
+            logger.error('ERROR: file %s, exception %s' % (pyfile, str(e)))
+            continue
+
+        # ok, got all the info.  now annotate file ==> html.
+
+        covered = coverage[k]
+        pyfile = open(k)
+        (n_covered, n_lines, output) = make_cover_lines(lines, covered, pyfile)
+
+
+        try:
+            pcnt = n_covered * 100. / n_lines
+        except ZeroDivisionError:
+            pcnt = 100
+        info_dict[k] = (n_lines, n_covered, pcnt)
+
+        outfile = make_cover_filename(k)
+        try:
+            outfp = open(outfile, 'w')
+            outfp.write("\n".join(output))
+            outfp.write("\n")
+            outfp.close()
+        except IOError:
+            logger.warning('cannot open filename %s' % (outfile,))
+            continue
+
+        logger.info('reported on %s' % (outfile,))
+
+    ### print a summary, too.
+
+    info_dict_items = info_dict.items()
+
+    def sort_by_pcnt(a, b):
+        a = a[1][2]
+        b = b[1][2]
+
+        return -cmp(a,b)
+
+    info_dict_items.sort(sort_by_pcnt)
+
+    logger.info('reported on %d file(s) total\n' % len(info_dict))
+    return len(info_dict)
+
+def make_cover_lines(line_info, coverage_info, fp):
+    n_covered = n_lines = 0
+    output = []
+    
+    for i, line in enumerate(fp):
+        is_covered = False
+        is_line = False
+
+        i += 1
+
+        if i in coverage_info:
+            is_covered = True
+            prefix = '+'
+
+            n_covered += 1
+            n_lines += 1
+        elif i in line_info:
+            prefix = '-'
+            is_line = True
+
+            n_lines += 1
+        else:
+            prefix = '0'
+
+        line = line.rstrip()
+        output.append(prefix + ' ' + line)
+    
+    return (n_covered, n_lines, output)
+
+def make_cover_filename(orig):
+    return orig + '.cover'
+
+def main():
+    import sys
+    import logging
+    from optparse import OptionParser
+    
+    ###
+
+    option_parser = OptionParser()
+
+    option_parser.add_option('-x', '--exclude-patterns', action="store",
+                             dest="exclude_patterns_file",
+                             help="file containing regexp patterns to exclude")
+
+    option_parser.add_option('-q', '--quiet', action='store_true',
+                             dest='quiet',
+         help="file containig regexp patterns of files to exclude from report")
+    
+    option_parser.add_option('-D', '--debug', action='store_true',
+                             dest='debug',
+                             help='Show all debugging messages')
+
+    (options, args) = option_parser.parse_args()
+
+    if options.quiet:
+        logging.disable(logging.DEBUG)
+
+    if options.debug:
+        logger.setLevel(logging.DEBUG)
+
+    ### load
+
+    if not args:
+        args = ['.figleaf']
+
+    coverage = {}
+    for filename in args:
+        logger.debug("loading coverage info from '%s'\n" % (filename,))
+        d = figleaf.read_coverage(filename)
+        coverage = figleaf.combine_coverage(coverage, d)
+
+    if not coverage:
+        logger.warning('EXITING -- no coverage info!\n')
+        sys.exit(-1)
+
+    exclude = read_exclude_patterns(options.exclude_patterns_file)
+    report_as_cover(coverage, exclude)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/annotate_html.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/annotate_html.py b/tools/bin/ext/figleaf/annotate_html.py
new file mode 100644
index 0000000..e9bf7f9
--- /dev/null
+++ b/tools/bin/ext/figleaf/annotate_html.py
@@ -0,0 +1,276 @@
+import figleaf
+import os
+import re
+
+# use builtin sets if in >= 2.4, otherwise use 'sets' module.
+try:
+    set()
+except NameError:
+    from sets import Set as set
+
+from figleaf.annotate import read_exclude_patterns, filter_files, logger, \
+     read_files_list
+
+###
+
+def annotate_file(fp, lines, covered):
+    # initialize
+    n_covered = n_lines = 0
+
+    output = []
+    for i, line in enumerate(fp):
+        is_covered = False
+        is_line = False
+
+        i += 1
+
+        if i in covered:
+            is_covered = True
+
+            n_covered += 1
+            n_lines += 1
+        elif i in lines:
+            is_line = True
+
+            n_lines += 1
+
+        color = 'black'
+        if is_covered:
+            color = 'green'
+        elif is_line:
+            color = 'red'
+
+        line = escape_html(line.rstrip())
+        output.append('<font color="%s">%4d. %s</font>' % (color, i, line))
+
+    try:
+        percent = n_covered * 100. / n_lines
+    except ZeroDivisionError:
+        percent = 100
+
+    return output, n_covered, n_lines, percent
+
+def write_html_summary(info_dict, directory):
+    info_dict_items = info_dict.items()
+
+    def sort_by_percent(a, b):
+        a = a[1][2]
+        b = b[1][2]
+
+        return -cmp(a,b)
+    info_dict_items.sort(sort_by_percent)
+
+    summary_lines = sum([ v[0] for (k, v) in info_dict_items])
+    summary_cover = sum([ v[1] for (k, v) in info_dict_items])
+
+    summary_percent = 100
+    if summary_lines:
+        summary_percent = float(summary_cover) * 100. / float(summary_lines)
+
+
+    percents = [ float(v[1]) * 100. / float(v[0])
+                 for (k, v) in info_dict_items if v[0] ]
+    
+    percent_90 = [ x for x in percents if x >= 90 ]
+    percent_75 = [ x for x in percents if x >= 75 ]
+    percent_50 = [ x for x in percents if x >= 50 ]
+
+    ### write out summary.
+
+    index_fp = open('%s/index.html' % (directory,), 'w')
+    index_fp.write('''
+<html>
+<title>figleaf code coverage report</title>
+<h2>Summary</h2>
+%d files total: %d files &gt; 90%%, %d files &gt; 75%%, %d files &gt; 50%%
+<p>
+<table border=1>
+<tr>
+ <th>Filename</th><th># lines</th><th># covered</th><th>%% covered</th>
+</tr>
+
+<tr>
+ <td><b>totals:</b></td>
+ <td><b>%d</b></td>
+ <td><b>%d</b></td>
+ <td><b>%.1f%%</b></td>
+</tr>
+
+<tr></tr>
+
+''' % (len(percents), len(percent_90), len(percent_75), len(percent_50),
+       summary_lines, summary_cover, summary_percent,))
+
+    for filename, (n_lines, n_covered, percent_covered,) in info_dict_items:
+        html_outfile = make_html_filename(filename)
+
+        index_fp.write('''
+<tr>
+ <td><a href="./%s">%s</a></td>
+ <td>%d</td>
+ <td>%d</td>
+ <td>%.1f</td>
+</tr>
+''' % (html_outfile, filename, n_lines, n_covered, percent_covered,))
+
+    index_fp.write('</table>\n')
+    index_fp.close()
+    
+
+def report_as_html(coverage, directory, exclude_patterns, files_list):
+    """
+    Write an HTML report on all of the files, plus a summary.
+    """
+
+    ### now, output.
+
+    keys = coverage.keys()
+    info_dict = {}
+    for pyfile in filter_files(keys, exclude_patterns, files_list):
+
+        try:
+            fp = open(pyfile, 'rU')
+            lines = figleaf.get_lines(fp)
+        except KeyboardInterrupt:
+            raise
+        except IOError:
+            logger.error('CANNOT OPEN: %s' % (pyfile,))
+            continue
+        except Exception, e:
+            logger.error('ERROR: file %s, exception %s' % (pyfile, str(e)))
+            continue
+
+        #
+        # ok, we want to annotate this file.  now annotate file ==> html.
+        #
+
+        # initialize
+        covered = coverage.get(pyfile, set())
+
+        # rewind
+        fp.seek(0)
+
+        # annotate
+        output, n_covered, n_lines, percent = annotate_file(fp, lines, covered)
+
+        # summarize
+        info_dict[pyfile] = (n_lines, n_covered, percent)
+
+        # write out the file
+        html_outfile = make_html_filename(pyfile)
+        html_outfile = os.path.join(directory, html_outfile)
+        html_outfp = open(html_outfile, 'w')
+        
+        html_outfp.write('source file: <b>%s</b><br>\n' % (pyfile,))
+        html_outfp.write('''
+
+file stats: <b>%d lines, %d executed: %.1f%% covered</b>
+<pre>
+%s
+</pre>
+
+''' % (n_lines, n_covered, percent, "\n".join(output)))
+            
+        html_outfp.close()
+
+        logger.info('reported on %s' % (pyfile,))
+
+    ### print a summary, too.
+    write_html_summary(info_dict, directory)
+
+    logger.info('reported on %d file(s) total\n' % len(info_dict))
+
+def prepare_reportdir(dirname):
+    "Create output directory."
+    try:
+        os.mkdir(dirname)
+    except OSError:                         # already exists
+        pass
+
+def make_html_filename(orig):
+    "'escape' original paths into a single filename"
+
+    orig = os.path.abspath(orig)
+#    orig = os.path.basename(orig)
+    orig = os.path.splitdrive(orig)[1]
+    orig = orig.replace('_', '__')
+    orig = orig.replace(os.path.sep, '_')
+    orig += '.html'
+    return orig
+
+def escape_html(s):
+    s = s.replace("&", "&amp;")
+    s = s.replace("<", "&lt;")
+    s = s.replace(">", "&gt;")
+    s = s.replace('"', "&quot;")
+    return s
+
+def main():
+    import sys
+    import logging
+    from optparse import OptionParser
+    ###
+
+    usage = "usage: %prog [options] [coverage files ... ]"
+    option_parser = OptionParser(usage=usage)
+
+    option_parser.add_option('-x', '--exclude-patterns', action="store",
+                             dest="exclude_patterns_file",
+        help="file containing regexp patterns of files to exclude from report")
+
+    option_parser.add_option('-f', '--files-list', action="store",
+                             dest="files_list",
+                             help="file containing filenames to report on")
+
+    option_parser.add_option('-d', '--output-directory', action='store',
+                             dest="output_dir",
+                             default = "html",
+                             help="directory for HTML output")
+
+    option_parser.add_option('-q', '--quiet', action='store_true',
+                             dest='quiet',
+                             help='Suppress all but error messages')
+    
+    option_parser.add_option('-D', '--debug', action='store_true',
+                             dest='debug',
+                             help='Show all debugging messages')
+
+    (options, args) = option_parser.parse_args()
+
+    if options.quiet:
+        logging.disable(logging.DEBUG)
+        
+    if options.debug:
+        logger.setLevel(logging.DEBUG)
+
+    ### load/combine
+
+    if not args:
+        args = ['.figleaf']
+
+    coverage = {}
+    for filename in args:
+        logger.debug("loading coverage info from '%s'\n" % (filename,))
+        try:
+            d = figleaf.read_coverage(filename)
+            coverage = figleaf.combine_coverage(coverage, d)
+        except IOError:
+            logger.error("cannot open filename '%s'\n" % (filename,))
+
+    if not coverage:
+        logger.warning('EXITING -- no coverage info!\n')
+        sys.exit(-1)
+
+    exclude = []
+    if options.exclude_patterns_file:
+        exclude = read_exclude_patterns(options.exclude_patterns_file)
+
+    files_list = {}
+    if options.files_list:
+        files_list = read_files_list(options.files_list)
+
+    ### make directory
+    prepare_reportdir(options.output_dir)
+    report_as_html(coverage, options.output_dir, exclude, files_list)
+
+    print 'figleaf: HTML output written to %s' % (options.output_dir,)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/annotate_sections.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/annotate_sections.py b/tools/bin/ext/figleaf/annotate_sections.py
new file mode 100644
index 0000000..167b391
--- /dev/null
+++ b/tools/bin/ext/figleaf/annotate_sections.py
@@ -0,0 +1,79 @@
+#! /usr/bin/env python
+import figleaf
+from figleaf import internals
+from sets import Set as set
+import sys
+from cPickle import load
+import os
+from optparse import OptionParser
+
+def main():
+    #### OPTIONS
+
+    parser = OptionParser()
+
+    parser.add_option('-c', '--coverage', nargs=1, action="store",
+                      dest="coverage_file", 
+                      help = 'load coverage info from this file',
+                      default='.figleaf_sections')
+
+    ####
+
+    (options, args) = parser.parse_args(sys.argv[1:])
+    coverage_file = options.coverage_file
+    
+    figleaf.load_pickled_coverage(open(coverage_file))
+    data = internals.CoverageData(figleaf._t)
+    full_cov = data.gather_files()
+
+    for filename in args:
+        annotate_file_with_sections(filename, data, full_cov)
+
+def annotate_file_with_sections(short, data, full_cov):
+    full = os.path.abspath(short)
+
+    tags = {}
+    sections = data.gather_sections(full)
+    sections.update(data.gather_sections(short))
+
+    print data.sections
+
+    print '*** PROCESSING:', short, '\n\t==>', short + '.sections'
+    for tag, cov in sections.items():
+        if cov:
+            tags[tag] = cov
+
+    if not tags:
+        print '*** No coverage info for file', short
+
+    tag_names = tags.keys()
+    tag_names.sort()
+    tag_names.reverse()
+
+    tags["-- all coverage --"] = full_cov.get(full, set())
+    tag_names.insert(0, "-- all coverage --")
+
+    n_tags = len(tag_names)
+    
+    fp = open(short + '.sections', 'w')
+
+    for i, tag in enumerate(tag_names):
+        fp.write('%s%s\n' % ('| ' * i, tag))
+    fp.write('| ' * n_tags)
+    fp.write('\n\n')
+
+    source = open(full)
+    for n, line in enumerate(source):
+        marks = ""
+        for tag in tag_names:
+            cov = tags[tag]
+
+            symbol = '  '
+            if (n+1) in cov:
+                symbol = '+ '
+
+            marks += symbol
+
+        fp.write('%s  | %s' % (marks, line))
+    
+    fp.close()

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/figleaf2html
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/figleaf2html b/tools/bin/ext/figleaf/figleaf2html
new file mode 100755
index 0000000..58636b0
--- /dev/null
+++ b/tools/bin/ext/figleaf/figleaf2html
@@ -0,0 +1,7 @@
+#! /usr/bin/env python
+"""
+Output an HTML-ized coverage report.
+"""
+import _lib
+import figleaf.annotate_html
+figleaf.annotate_html.main()

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/internals.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/internals.py b/tools/bin/ext/figleaf/internals.py
new file mode 100644
index 0000000..a3d2162
--- /dev/null
+++ b/tools/bin/ext/figleaf/internals.py
@@ -0,0 +1,241 @@
+"""
+Coverage tracking internals.
+"""
+
+import sys
+import threading
+
+err = sys.stderr
+
+import types, symbol
+
+# use builtin sets if in >= 2.4, otherwise use 'sets' module.
+try:
+    set()
+except NameError:
+    from sets import Set as set
+
+def get_interesting_lines(code):
+    """
+    Count 'interesting' lines of Python in a code object, where
+    'interesting' is defined as 'lines that could possibly be
+    executed'.
+
+    This is done by dissassembling the code objecte and returning
+    line numbers.
+    """
+
+    # clean up weird end-of-file issues
+
+    lines = set([ l for (o, l) in findlinestarts(code) ])
+    for const in code.co_consts:
+        if type(const) == types.CodeType:
+            lines.update(get_interesting_lines(const))
+
+    return lines
+
+def findlinestarts(code):
+    """Find the offsets in a byte code which are start of lines in the source.
+
+    Generate pairs (offset, lineno) as described in Python/compile.c.
+
+    CTB -- swiped from Python 2.5, module 'dis', so that earlier versions
+    of Python could use the function, too.
+    """
+    byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
+    line_increments = [ord(c) for c in code.co_lnotab[1::2]]
+
+    lastlineno = None
+    lineno = code.co_firstlineno
+    addr = 0
+    for byte_incr, line_incr in zip(byte_increments, line_increments):
+        if byte_incr:
+            if lineno != lastlineno:
+                yield (addr, lineno)
+                lastlineno = lineno
+            addr += byte_incr
+        lineno += line_incr
+    if lineno != lastlineno:
+        yield (addr, lineno)
+
+class CodeTracer:
+    """
+    Basic mechanisms for code coverage tracking, using sys.settrace.  
+    """
+    def __init__(self, exclude_prefix, include_only_prefix):
+        self.common = self.c = set()
+        self.section_name = None
+        self.sections = {}
+        
+        self.started = False
+
+        assert not (exclude_prefix and include_only_prefix), \
+               "mutually exclusive"
+        
+        self.excl = exclude_prefix
+        self.incl = include_only_prefix
+
+    def start(self):
+        """
+        Start recording.
+        """
+        if not self.started:
+            self.started = True
+
+            if self.excl and not self.incl:
+                global_trace_fn = self.g1
+            elif self.incl and not self.excl:
+                global_trace_fn = self.g2
+            else:
+                global_trace_fn = self.g0
+
+            sys.settrace(global_trace_fn)
+
+            if hasattr(threading, 'settrace'):
+                threading.settrace(global_trace_fn)
+
+    def stop(self):
+        if self.started:
+            sys.settrace(None)
+            
+            if hasattr(threading, 'settrace'):
+                threading.settrace(None)
+
+            self.started = False
+            self.stop_section()
+
+    def g0(self, f, e, a):
+        """
+        global trace function, no exclude/include info.
+
+        f == frame, e == event, a == arg        .
+        """
+        if e == 'call':
+            return self.t
+
+    def g1(self, f, e, a):
+        """
+        global trace function like g0, but ignores files starting with
+        'self.excl'.
+        """
+        if e == 'call':
+            excl = self.excl
+            path = f.f_globals.get('__file__')
+            if path is None:
+                path = f.f_code.co_filename
+
+            if excl and path.startswith(excl):
+                return
+
+            return self.t
+
+    def g2(self, f, e, a):
+        """
+        global trace function like g0, but only records files starting with
+        'self.incl'.
+        """
+        if e == 'call':
+            incl = self.incl
+            if incl and f.f_code.co_filename.startswith(incl):
+                return self.t
+
+    def t(self, f, e, a):
+        """
+        local trace function.
+        """
+        if e is 'line':
+            self.c.add((f.f_code.co_filename, f.f_lineno))
+        return self.t
+
+    def clear(self):
+        """
+        wipe out coverage info
+        """
+
+        self.c = {}
+
+    def start_section(self, name):
+        self.stop_section()
+
+        self.section_name = name
+        self.c = self.sections.get(name, set())
+        
+    def stop_section(self):
+        if self.section_name:
+            self.sections[self.section_name] = self.c
+            self.section_name = None
+            self.c = self.common
+
+class CoverageData:
+    """
+    A class to manipulate and combine data from the CodeTracer object.
+
+    In general, do not pickle this object; it's simpler and more
+    straightforward to just pass the basic Python objects around
+    (e.g. CoverageData.common, a set, and CoverageData.sections, a
+    dictionary of sets).
+    """
+    def __init__(self, trace_obj=None):
+        self.common = set()
+        self.sections = {}
+        
+        if trace_obj:
+            self.update(trace_obj)
+            
+    def update(self, trace_obj):
+        # transfer common-block code coverage -- if no sections are set,
+        # this will be all of the code coverage info.
+        self.common.update(trace_obj.common)
+
+        # update our internal section dictionary with the (filename, line_no)
+        # pairs from the section coverage as well.
+        
+        for section_name, section_d in trace_obj.sections.items():
+            section_set = self.sections.get(section_name, set())
+            section_set.update(section_d)
+            self.sections[section_name] = section_set
+
+    def gather_files(self, name=None):
+        """
+        Return the dictionary of lines of executed code; the dict
+        keys are filenames and values are sets containing individual
+        (integer) line numbers.
+        
+        'name', if set, is the desired section name from which to gather
+        coverage info.
+        """
+        cov = set()
+        cov.update(self.common)
+
+        if name is None:
+            for section_name, coverage_set in self.sections.items():
+                cov.update(coverage_set)
+        else:
+            coverage_set = self.sections.get(name, set())
+            cov.update(coverage_set)
+            
+#        cov = list(cov)
+#        cov.sort()
+
+        files = {}
+        for (filename, line) in cov:    # @CTB could optimize
+            d = files.get(filename, set())
+            d.add(line)
+            files[filename] = d
+
+        return files
+
+    def gather_sections(self, file):
+        """
+        Return a dictionary of sets containing section coverage information for
+        a specific file.  Dict keys are sections, and the dict values are
+        sets containing (integer) line numbers.
+        """
+        sections = {}
+        for k, c in self.sections.items():
+            s = set()
+            for (filename, line) in c.keys():
+                if filename == file:
+                    s.add(line)
+            sections[k] = s
+        return sections

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/figleaf/nose_sections.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/figleaf/nose_sections.py b/tools/bin/ext/figleaf/nose_sections.py
new file mode 100644
index 0000000..e6fd42d
--- /dev/null
+++ b/tools/bin/ext/figleaf/nose_sections.py
@@ -0,0 +1,117 @@
+"""
+figleafsections plugin for nose.
+
+Automatically records coverage info for Python tests and connects it with
+with test was being run at the time.  Can be used to produce a "barcode"
+of code execution.
+"""
+
+DEFAULT_COVERAGE_FILE='.figleaf_sections'
+import pkg_resources
+
+try:
+    pkg_resources.require('figleaf>=0.6.1')
+    import figleaf
+except ImportError:
+    figleaf = None
+
+import sys
+err = sys.stderr
+
+import nose.case
+from nose.plugins.base import Plugin
+
+import logging
+import os
+
+log = logging.getLogger(__name__)
+
+def calc_testname(test):
+    """
+    Build a reasonably human-readable testname from each test.
+    """
+    name = str(test)
+    if ' ' in name:
+        name = name.split(' ')[1]
+
+    return name
+
+class FigleafSections(Plugin):
+    def __init__(self):
+        self.name = 'figleafsections'
+        Plugin.__init__(self)
+        self.testname = None
+
+    def add_options(self, parser, env=os.environ):
+        env_opt = 'NOSE_WITH_%s' % self.name.upper()
+        env_opt.replace('-', '_')
+        parser.add_option("--with-%s" % self.name,
+                          action="store_true",
+                          dest=self.enableOpt,
+                          default=env.get(env_opt),
+                          help="Enable plugin %s: %s [%s]" %
+                          (self.__class__.__name__, self.help(), env_opt))
+
+        parser.add_option("--figleaf-file",
+                          action="store",
+                          dest="figleaf_file",
+                          default=None,
+                          help="Store figleaf section coverage in this file")
+        
+    def configure(self, options, config):
+        """
+        Configure: enable plugin?  And if so, where should the coverage
+        info be placed?
+        """
+        self.conf = config
+
+        # enable?
+        if hasattr(options, self.enableOpt):
+            self.enabled = getattr(options, self.enableOpt)
+
+        ### save coverage file name, if given.
+        if options.figleaf_file:
+            self.figleaf_file = options.figleaf_file
+        else:
+            self.figleaf_file = DEFAULT_COVERAGE_FILE
+
+        if self.enabled and figleaf is None:
+                raise Exception("You must install figleaf 0.6.1 before you can use the figleafsections plugin! See http://darcs.idyll.org/~t/projects/figleaf/doc/")
+
+    def begin(self):
+        """
+        Initialize: start recording coverage info.
+        """
+        figleaf.start()
+
+    def finalize(self, result):
+        """
+        Finalize: stop recording coverage info, save & exit.
+        """
+        figleaf.stop()
+        
+        fp = open(self.figleaf_file, 'w')
+        figleaf.dump_pickled_coverage(fp)
+        fp.close()
+
+    def startTest(self, test):
+        """
+        Run at the beginning of each test, before per-test fixtures.
+
+        One weakness is that this is only run for specific kinds of
+        nose testcases.
+        """
+        if isinstance(test, nose.case.Test):
+           
+            self.testname = calc_testname(test)
+            assert self.testname
+
+            figleaf.start_section(self.testname)
+
+    def stopTest(self, test):
+        """
+        Run at the end of each test, after per-test fixtures.
+        """
+        if self.testname:
+            figleaf.stop_section()
+            self.testname = None

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/pg8000/__init__.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/pg8000/__init__.py b/tools/bin/ext/pg8000/__init__.py
new file mode 100644
index 0000000..57de8e8
--- /dev/null
+++ b/tools/bin/ext/pg8000/__init__.py
@@ -0,0 +1,37 @@
+# vim: sw=4:expandtab:foldmethod=marker
+#
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__author__ = "Mathieu Fenniak"
+
+import dbapi as DBAPI
+pg8000_dbapi = DBAPI
+
+from interface import *
+from types import Bytea
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/pg8000/dbapi.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/pg8000/dbapi.py b/tools/bin/ext/pg8000/dbapi.py
new file mode 100644
index 0000000..3f90188
--- /dev/null
+++ b/tools/bin/ext/pg8000/dbapi.py
@@ -0,0 +1,621 @@
+# vim: sw=4:expandtab:foldmethod=marker
+#
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__author__ = "Mathieu Fenniak"
+
+import datetime
+import time
+import interface
+import types
+import threading
+from errors import *
+
+from warnings import warn
+
+##
+# The DBAPI level supported.  Currently 2.0.  This property is part of the
+# DBAPI 2.0 specification.
+apilevel = "2.0"
+
+##
+# Integer constant stating the level of thread safety the DBAPI interface
+# supports.  This DBAPI interface supports sharing of the module, connections,
+# and cursors.  This property is part of the DBAPI 2.0 specification.
+threadsafety = 3
+
+##
+# String property stating the type of parameter marker formatting expected by
+# the interface.  This value defaults to "format".  This property is part of
+# the DBAPI 2.0 specification.
+# <p>
+# Unlike the DBAPI specification, this value is not constant.  It can be
+# changed to any standard paramstyle value (ie. qmark, numeric, named, format,
+# and pyformat).
+paramstyle = 'format' # paramstyle can be changed to any DB-API paramstyle
+
+def convert_paramstyle(src_style, query, args):
+    # I don't see any way to avoid scanning the query string char by char,
+    # so we might as well take that careful approach and create a
+    # state-based scanner.  We'll use int variables for the state.
+    #  0 -- outside quoted string
+    #  1 -- inside single-quote string '...'
+    #  2 -- inside quoted identifier   "..."
+    #  3 -- inside escaped single-quote string, E'...'
+    
+    if args is None:
+        return  query, args
+    
+    state = 0
+    output_query = ""
+    output_args = []
+    if src_style == "numeric":
+        output_args = args
+    elif src_style in ("pyformat", "named"):
+        mapping_to_idx = {}
+    i = 0
+    while 1:
+        if i == len(query):
+            break
+        c = query[i]
+        # print "begin loop", repr(i), repr(c), repr(state)
+        if state == 0:
+            if c == "'":
+                i += 1
+                output_query += c
+                state = 1
+            elif c == '"':
+                i += 1
+                output_query += c
+                state = 2
+            elif c == 'E':
+                # check for escaped single-quote string
+                i += 1
+                if i < len(query) and i > 1 and query[i] == "'":
+                    i += 1
+                    output_query += "E'"
+                    state = 3
+                else:
+                    output_query += c
+            elif src_style == "qmark" and c == "?":
+                i += 1
+                param_idx = len(output_args)
+                if param_idx == len(args):
+                    raise QueryParameterIndexError("too many parameter fields, not enough parameters")
+                output_args.append(args[param_idx])
+                output_query += "$" + str(param_idx + 1)
+            elif src_style == "numeric" and c == ":":
+                i += 1
+                if i < len(query) and i > 1 and query[i].isdigit():
+                    output_query += "$" + query[i]
+                    i += 1
+                else:
+                    raise QueryParameterParseError("numeric parameter : does not have numeric arg")
+            elif src_style == "named" and c == ":":
+                name = ""
+                while 1:
+                    i += 1
+                    if i == len(query):
+                        break
+                    c = query[i]
+                    if c.isalnum() or c == '_':
+                        name += c
+                    else:
+                        break
+                if name == "":
+                    raise QueryParameterParseError("empty name of named parameter")
+                idx = mapping_to_idx.get(name)
+                if idx == None:
+                    idx = len(output_args)
+                    output_args.append(args[name])
+                    idx += 1
+                    mapping_to_idx[name] = idx
+                output_query += "$" + str(idx)
+            elif src_style == "format" and c == "%":
+                i += 1
+                if i < len(query) and i > 1:
+                    if query[i] == "s":
+                        param_idx = len(output_args)
+                        if param_idx == len(args):
+                            raise QueryParameterIndexError("too many parameter fields, not enough parameters")
+                        output_args.append(args[param_idx])
+                        output_query += "$" + str(param_idx + 1)
+                    elif query[i] == "%":
+                        output_query += "%"
+                    else:
+                        raise QueryParameterParseError("Only %s and %% are supported")
+                    i += 1
+                else:
+                    raise QueryParameterParseError("format parameter % does not have format code")
+            elif src_style == "pyformat" and c == "%":
+                i += 1
+                if i < len(query) and i > 1:
+                    if query[i] == "(":
+                        i += 1
+                        # begin mapping name
+                        end_idx = query.find(')', i)
+                        if end_idx == -1:
+                            raise QueryParameterParseError("began pyformat dict read, but couldn't find end of name")
+                        else:
+                            name = query[i:end_idx]
+                            i = end_idx + 1
+                            if i < len(query) and query[i] == "s":
+                                i += 1
+                                idx = mapping_to_idx.get(name)
+                                if idx == None:
+                                    idx = len(output_args)
+                                    output_args.append(args[name])
+                                    idx += 1
+                                    mapping_to_idx[name] = idx
+                                output_query += "$" + str(idx)
+                            else:
+                                raise QueryParameterParseError("format not specified or not supported (only %(...)s supported)")
+                    elif query[i] == "%":
+                        output_query += "%"
+                    elif query[i] == "s":
+                        # we have a %s in a pyformat query string.  Assume
+                        # support for format instead.
+                        i -= 1
+                        src_style = "format"
+                    else:
+                        raise QueryParameterParseError("Only %(name)s, %s and %% are supported")
+            else:
+                i += 1
+                output_query += c
+        elif state == 1:
+            output_query += c
+            i += 1
+            if c == "'":
+                # Could be a double ''
+                if i < len(query) and query[i] == "'":
+                    # is a double quote.
+                    output_query += query[i]
+                    i += 1
+                else:
+                    state = 0
+            elif src_style in ("pyformat","format") and c == "%":
+                # hm... we're only going to support an escaped percent sign
+                if i < len(query):
+                    if query[i] == "%":
+                        # good.  We already output the first percent sign.
+                        i += 1
+                    else:
+                        raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
+        elif state == 2:
+            output_query += c
+            i += 1
+            if c == '"':
+                state = 0
+            elif src_style in ("pyformat","format") and c == "%":
+                # hm... we're only going to support an escaped percent sign
+                if i < len(query):
+                    if query[i] == "%":
+                        # good.  We already output the first percent sign.
+                        i += 1
+                    else:
+                        raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
+        elif state == 3:
+            output_query += c
+            i += 1
+            if c == "\\":
+                # check for escaped single-quote
+                if i < len(query) and query[i] == "'":
+                    output_query += "'"
+                    i += 1
+            elif c == "'":
+                state = 0
+            elif src_style in ("pyformat","format") and c == "%":
+                # hm... we're only going to support an escaped percent sign
+                if i < len(query):
+                    if query[i] == "%":
+                        # good.  We already output the first percent sign.
+                        i += 1
+                    else:
+                        raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
+
+    return output_query, tuple(output_args)
+
+def require_open_cursor(fn):
+    def _fn(self, *args, **kwargs):
+        if self.cursor == None:
+            raise CursorClosedError()
+        return fn(self, *args, **kwargs)
+    return _fn
+
+##
+# The class of object returned by the {@link #ConnectionWrapper.cursor cursor method}.
+class CursorWrapper(object):
+    def __init__(self, conn, connection):
+        self.cursor = interface.Cursor(conn)
+        self.arraysize = 1
+        self._connection = connection
+        self._override_rowcount = None
+
+    ##
+    # This read-only attribute returns a reference to the connection object on
+    # which the cursor was created.
+    # <p>
+    # Stability: Part of a DBAPI 2.0 extension.  A warning "DB-API extension
+    # cursor.connection used" will be fired.
+    connection = property(lambda self: self._getConnection())
+
+    def _getConnection(self):
+        warn("DB-API extension cursor.connection used", stacklevel=3)
+        return self._connection
+
+    ##
+    # This read-only attribute specifies the number of rows that the last
+    # .execute*() produced (for DQL statements like 'select') or affected (for
+    # DML statements like 'update' or 'insert').
+    # <p>
+    # The attribute is -1 in case no .execute*() has been performed on the
+    # cursor or the rowcount of the last operation is cannot be determined by
+    # the interface.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    rowcount = property(lambda self: self._getRowCount())
+
+    @require_open_cursor
+    def _getRowCount(self):
+        if self._override_rowcount != None:
+            return self._override_rowcount
+        return self.cursor.row_count
+
+    ##
+    # This read-only attribute is a sequence of 7-item sequences.  Each value
+    # contains information describing one result column.  The 7 items returned
+    # for each column are (name, type_code, display_size, internal_size,
+    # precision, scale, null_ok).  Only the first two values are provided by
+    # this interface implementation.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    description = property(lambda self: self._getDescription())
+
+    @require_open_cursor
+    def _getDescription(self):
+        if self.cursor.row_description == None:
+            return None
+        columns = []
+        for col in self.cursor.row_description:
+            columns.append((col["name"], col["type_oid"], None, None, None, None, None))
+        return columns
+
+    ##
+    # Executes a database operation.  Parameters may be provided as a sequence
+    # or mapping and will be bound to variables in the operation.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_cursor
+    def execute(self, operation, args=()):
+        self._override_rowcount = None
+        self._execute(operation, args)
+
+    def _execute(self, operation, args=()):
+        new_query, new_args = convert_paramstyle(paramstyle, operation, args)
+        try:
+            self.cursor.execute(new_query, *new_args)
+        except ConnectionClosedError:
+            # can't rollback in this case
+            raise
+        except:
+            # any error will rollback the transaction to-date
+            self._connection.rollback()
+            raise
+
+    def copy_from(self, fileobj, table=None, sep='\t', null=None, query=None):
+        if query == None:
+            if table == None:
+                raise CopyQueryOrTableRequiredError()
+            query = "COPY %s FROM stdout DELIMITER '%s'" % (table, sep)
+            if null is not None:
+                query += " NULL '%s'" % (null,)
+        self.copy_execute(fileobj, query)
+
+    def copy_to(self, fileobj, table=None, sep='\t', null=None, query=None):
+        if query == None:
+            if table == None:
+                raise CopyQueryOrTableRequiredError()
+            query = "COPY %s TO stdout DELIMITER '%s'" % (table, sep)
+            if null is not None:
+                query += " NULL '%s'" % (null,)
+        self.copy_execute(fileobj, query)
+    
+    @require_open_cursor
+    def copy_execute(self, fileobj, query):
+        try:
+            self.cursor.execute(query, stream=fileobj)
+        except ConnectionClosedError:
+            # can't rollback in this case
+            raise
+        except:
+            # any error will rollback the transaction to-date
+            import traceback; traceback.print_exc()
+            self._connection.rollback()
+            raise
+
+    ##
+    # Prepare a database operation and then execute it against all parameter
+    # sequences or mappings provided.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_cursor
+    def executemany(self, operation, parameter_sets):
+        self._override_rowcount = 0
+        for parameters in parameter_sets:
+            self._execute(operation, parameters)
+            if self.cursor.row_count == -1 or self._override_rowcount == -1:
+                self._override_rowcount = -1
+            else:
+                self._override_rowcount += self.cursor.row_count
+
+    ##
+    # Fetch the next row of a query result set, returning a single sequence, or
+    # None when no more data is available.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_cursor
+    def fetchone(self):
+        return self.cursor.read_tuple()
+
+    ##
+    # Fetch the next set of rows of a query result, returning a sequence of
+    # sequences.  An empty sequence is returned when no more rows are
+    # available.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    # @param size   The number of rows to fetch when called.  If not provided,
+    #               the arraysize property value is used instead.
+    def fetchmany(self, size=None):
+        if size == None:
+            size = self.arraysize
+        rows = []
+        for i in range(size):
+            value = self.fetchone()
+            if value == None:
+                break
+            rows.append(value)
+        return rows
+
+    ##
+    # Fetch all remaining rows of a query result, returning them as a sequence
+    # of sequences.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_cursor
+    def fetchall(self):
+        return tuple(self.cursor.iterate_tuple())
+
+    ##
+    # Close the cursor.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_cursor
+    def close(self):
+        self.cursor.close()
+        self.cursor = None
+        self._override_rowcount = None
+
+    def next(self):
+        warn("DB-API extension cursor.next() used", stacklevel=2)
+        retval = self.fetchone()
+        if retval == None:
+            raise StopIteration()
+        return retval
+
+    def __iter__(self):
+        warn("DB-API extension cursor.__iter__() used", stacklevel=2)
+        return self
+
+    def setinputsizes(self, sizes):
+        pass
+
+    def setoutputsize(self, size, column=None):
+        pass
+
+    @require_open_cursor
+    def fileno(self):
+        return self.cursor.fileno()
+    
+    @require_open_cursor
+    def isready(self):
+        return self.cursor.isready()
+
+def require_open_connection(fn):
+    def _fn(self, *args, **kwargs):
+        if self.conn == None:
+            raise ConnectionClosedError()
+        return fn(self, *args, **kwargs)
+    return _fn
+
+##
+# The class of object returned by the {@link #connect connect method}.
+class ConnectionWrapper(object):
+    # DBAPI Extension: supply exceptions as attributes on the connection
+    Warning = property(lambda self: self._getError(Warning))
+    Error = property(lambda self: self._getError(Error))
+    InterfaceError = property(lambda self: self._getError(InterfaceError))
+    DatabaseError = property(lambda self: self._getError(DatabaseError))
+    OperationalError = property(lambda self: self._getError(OperationalError))
+    IntegrityError = property(lambda self: self._getError(IntegrityError))
+    InternalError = property(lambda self: self._getError(InternalError))
+    ProgrammingError = property(lambda self: self._getError(ProgrammingError))
+    NotSupportedError = property(lambda self: self._getError(NotSupportedError))
+
+    def _getError(self, error):
+        warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
+        return error
+
+    def __init__(self, **kwargs):
+        self.conn = interface.Connection(**kwargs)
+        self.notifies = []
+        self.notifies_lock = threading.Lock()
+        self.conn.NotificationReceived += self._notificationReceived
+        self.conn.begin()
+
+    def _notificationReceived(self, notice):
+        try:
+        # psycopg2 compatible notification interface
+            self.notifies_lock.acquire()
+            self.notifies.append((notice.backend_pid, notice.condition))
+        finally:
+            self.notifies_lock.release()
+
+    ##
+    # Creates a {@link #CursorWrapper CursorWrapper} object bound to this
+    # connection.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_connection
+    def cursor(self):
+        return CursorWrapper(self.conn, self)
+
+    ##
+    # Commits the current database transaction.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_connection
+    def commit(self):
+        # There's a threading bug here.  If a query is sent after the
+        # commit, but before the begin, it will be executed immediately
+        # without a surrounding transaction.  Like all threading bugs -- it
+        # sounds unlikely, until it happens every time in one
+        # application...  however, to fix this, we need to lock the
+        # database connection entirely, so that no cursors can execute
+        # statements on other threads.  Support for that type of lock will
+        # be done later.
+        self.conn.commit()
+        self.conn.begin()
+
+    ##
+    # Rolls back the current database transaction.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_connection
+    def rollback(self):
+        # see bug description in commit.
+        self.conn.rollback()
+        self.conn.begin()
+
+    ##
+    # Closes the database connection.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    @require_open_connection
+    def close(self):
+        self.conn.close()
+        self.conn = None
+
+    @require_open_connection
+    def recache_record_types(self):
+        self.conn.recache_record_types()
+
+
+##
+# Creates a DBAPI 2.0 compatible interface to a PostgreSQL database.
+# <p>
+# Stability: Part of the DBAPI 2.0 specification.
+#
+# @param user   The username to connect to the PostgreSQL server with.  This
+# parameter is required.
+#
+# @keyparam host   The hostname of the PostgreSQL server to connect with.
+# Providing this parameter is necessary for TCP/IP connections.  One of either
+# host, or unix_sock, must be provided.
+#
+# @keyparam unix_sock   The path to the UNIX socket to access the database
+# through, for example, '/tmp/.s.PGSQL.5432'.  One of either unix_sock or host
+# must be provided.  The port parameter will have no affect if unix_sock is
+# provided.
+#
+# @keyparam port   The TCP/IP port of the PostgreSQL server instance.  This
+# parameter defaults to 5432, the registered and common port of PostgreSQL
+# TCP/IP servers.
+#
+# @keyparam database   The name of the database instance to connect with.  This
+# parameter is optional, if omitted the PostgreSQL server will assume the
+# database name is the same as the username.
+#
+# @keyparam password   The user password to connect to the server with.  This
+# parameter is optional.  If omitted, and the database server requests password
+# based authentication, the connection will fail.  On the other hand, if this
+# parameter is provided and the database does not request password
+# authentication, then the password will not be used.
+#
+# @keyparam socket_timeout  Socket connect timeout measured in seconds.
+# Defaults to 60 seconds.
+#
+# @keyparam ssl     Use SSL encryption for TCP/IP socket.  Defaults to False.
+#
+# @return An instance of {@link #ConnectionWrapper ConnectionWrapper}.
+def connect(user, host=None, unix_sock=None, port=5432, database=None, password=None, socket_timeout=60, ssl=False, options=None):
+    return ConnectionWrapper(user=user, host=host,
+            unix_sock=unix_sock, port=port, database=database,
+            password=password, socket_timeout=socket_timeout, ssl=ssl, options=options)
+
+def Date(year, month, day):
+    return datetime.date(year, month, day)
+
+def Time(hour, minute, second):
+    return datetime.time(hour, minute, second)
+
+def Timestamp(year, month, day, hour, minute, second):
+    return datetime.datetime(year, month, day, hour, minute, second)
+
+def DateFromTicks(ticks):
+    return Date(*time.localtime(ticks)[:3])
+
+def TimeFromTicks(ticks):
+    return Time(*time.localtime(ticks)[3:6])
+
+def TimestampFromTicks(ticks):
+    return Timestamp(*time.localtime(ticks)[:6])
+
+##
+# Construct an object holding binary data.
+def Binary(value):
+    return types.Bytea(value)
+
+# I have no idea what this would be used for by a client app.  Should it be
+# TEXT, VARCHAR, CHAR?  It will only compare against row_description's
+# type_code if it is this one type.  It is the varchar type oid for now, this
+# appears to match expectations in the DB API 2.0 compliance test suite.
+STRING = 1043
+
+# bytea type_oid
+BINARY = 17
+
+# numeric type_oid
+NUMBER = 1700
+
+# timestamp type_oid
+DATETIME = 1114
+
+# oid type_oid
+ROWID = 26
+
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a485be47/tools/bin/ext/pg8000/errors.py
----------------------------------------------------------------------
diff --git a/tools/bin/ext/pg8000/errors.py b/tools/bin/ext/pg8000/errors.py
new file mode 100644
index 0000000..b8b5acf
--- /dev/null
+++ b/tools/bin/ext/pg8000/errors.py
@@ -0,0 +1,115 @@
+# vim: sw=4:expandtab:foldmethod=marker
+#
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__author__ = "Mathieu Fenniak"
+
+class Warning(StandardError):
+    pass
+
+class Error(StandardError):
+    pass
+
+class InterfaceError(Error):
+    pass
+
+class ConnectionClosedError(InterfaceError):
+    def __init__(self):
+        InterfaceError.__init__(self, "connection is closed")
+
+class CursorClosedError(InterfaceError):
+    def __init__(self):
+        InterfaceError.__init__(self, "cursor is closed")
+
+class DatabaseError(Error):
+    pass
+
+class DataError(DatabaseError):
+    pass
+
+class OperationalError(DatabaseError):
+    pass
+
+class IntegrityError(DatabaseError):
+    pass
+
+class InternalError(DatabaseError):
+    pass
+
+class ProgrammingError(DatabaseError):
+    pass
+
+class NotSupportedError(DatabaseError):
+    pass
+
+##
+# An exception that is thrown when an internal error occurs trying to
+# decode binary array data from the server.
+class ArrayDataParseError(InternalError):
+    pass
+
+##
+# Thrown when attempting to transmit an array of unsupported data types.
+class ArrayContentNotSupportedError(NotSupportedError):
+    pass
+
+##
+# Thrown when attempting to send an array that doesn't contain all the same
+# type of objects (eg. some floats, some ints).
+class ArrayContentNotHomogenousError(ProgrammingError):
+    pass
+
+##
+# Attempted to pass an empty array in, but it's not possible to determine the
+# data type for an empty array.
+class ArrayContentEmptyError(ProgrammingError):
+    pass
+
+##
+# Attempted to use a multidimensional array with inconsistent array sizes.
+class ArrayDimensionsNotConsistentError(ProgrammingError):
+    pass
+
+# A cursor's copy_to or copy_from argument was not provided a table or query
+# to operate on.
+class CopyQueryOrTableRequiredError(ProgrammingError):
+    pass
+
+# Raised if a COPY query is executed without using copy_to or copy_from
+# functions to provide a data stream.
+class CopyQueryWithoutStreamError(ProgrammingError):
+    pass
+
+# When query parameters don't match up with query args.
+class QueryParameterIndexError(ProgrammingError):
+    pass
+
+# Some sort of parse error occured during query parameterization.
+class QueryParameterParseError(ProgrammingError):
+    pass
+


Mime
View raw message