subversion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hwri...@apache.org
Subject svn commit: r1404846 [4/4] - in /subversion/branches/ev2-export: ./ subversion/include/ subversion/include/private/ subversion/libsvn_client/ subversion/libsvn_delta/ subversion/libsvn_fs_fs/ subversion/libsvn_ra/ subversion/libsvn_ra_serf/ subversion/...
Date Fri, 02 Nov 2012 00:57:12 GMT
Modified: subversion/branches/ev2-export/subversion/tests/cmdline/svntest/main.py
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/subversion/tests/cmdline/svntest/main.py?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/subversion/tests/cmdline/svntest/main.py (original)
+++ subversion/branches/ev2-export/subversion/tests/cmdline/svntest/main.py Fri Nov  2 00:57:09
2012
@@ -174,6 +174,12 @@ work_dir = "svn-test-work"
 # Constant for the merge info property.
 SVN_PROP_MERGEINFO = "svn:mergeinfo"
 
+# Constant for the inheritabled auto-props property.
+SVN_PROP_INHERITABLE_AUTOPROPS = "svn:inheritable-auto-props"
+
+# Constant for the inheritabled ignores property.
+SVN_PROP_INHERITABLE_IGNORES = "svn:inheritable-ignores"
+
 # Where we want all the repositories and working copies to live.
 # Each test will have its own!
 general_repo_dir = os.path.join(work_dir, "repositories")
@@ -952,12 +958,16 @@ def copy_repos(src_path, dst_path, head_
 
   dump_re = re.compile(r'^\* Dumped revision (\d+)\.\r?$')
   expect_revision = 0
+  dump_failed = False
   for dump_line in dump_stderr:
     match = dump_re.match(dump_line)
     if not match or match.group(1) != str(expect_revision):
       logger.warn('ERROR:  dump failed: %s', dump_line.strip())
-      raise SVNRepositoryCopyFailure
-    expect_revision += 1
+      dump_failed = True
+    else:
+      expect_revision += 1
+  if dump_failed:
+    raise SVNRepositoryCopyFailure
   if expect_revision != head_revision + 1:
     logger.warn('ERROR:  dump failed; did not see revision %s', head_revision)
     raise SVNRepositoryCopyFailure
@@ -1299,7 +1309,8 @@ class TestRunner:
 
   def list(self, milestones_dict=None):
     """Print test doc strings.  MILESTONES_DICT is an optional mapping
-    of issue numbers to target milestones."""
+    of issue numbers to an list containing target milestones and who
+    the issue is assigned to."""
     if options.mode_filter.upper() == 'ALL' \
        or options.mode_filter.upper() == self.pred.list_mode().upper() \
        or (options.mode_filter.upper() == 'PASS' \
@@ -1309,6 +1320,7 @@ class TestRunner:
       if self.pred.issues:
         if not options.milestone_filter or milestones_dict is None:
           issues = self.pred.issues
+          tail += " [%s]" % ','.join(['#%s' % str(i) for i in issues])
         else: # Limit listing by requested target milestone(s).
           filter_issues = []
           matches_filter = False
@@ -1317,13 +1329,16 @@ class TestRunner:
           # If any one of them matches the MILESTONE_FILTER then we'll print
           # them all.
           for issue in self.pred.issues:
-            # A safe starting assumption.
+            # Some safe starting assumptions.
             milestone = 'unknown'
+            assigned_to = 'unknown'
             if milestones_dict:
               if milestones_dict.has_key(str(issue)):
-                milestone = milestones_dict[str(issue)]
+                milestone = milestones_dict[str(issue)][0]
+                assigned_to = milestones_dict[str(issue)][1]
 
-            filter_issues.append(str(issue) + '(' + milestone + ')')
+            filter_issues.append(
+              str(issue) + '(' + milestone + '/' + assigned_to + ')')
             pattern = re.compile(options.milestone_filter)
             if pattern.match(milestone):
               matches_filter = True
@@ -1331,9 +1346,12 @@ class TestRunner:
           # Did at least one of the associated issues meet our filter?
           if matches_filter:
             issues = filter_issues
-
-        tail += " [%s]" % ','.join(['#%s' % str(i) for i in issues])
-
+          # Wrap the issue#/target-milestone/assigned-to string
+          # to the next line and add a line break to enhance
+          # readability.
+          tail += "\n               %s" % '\n               '.join(
+            ['#%s' % str(i) for i in issues])
+          tail += '\n'
       # If there is no filter or this test made if through
       # the filter then print it!
       if options.milestone_filter is None or len(issues):
@@ -1670,7 +1688,12 @@ def run_tests(test_list, serial_only = F
 
   sys.exit(execute_tests(test_list, serial_only))
 
-def get_target_milestones_for_issues(issue_numbers):
+def get_issue_details(issue_numbers):
+  """For each issue number in ISSUE_NUMBERS query the issue
+     tracker and determine what the target milestone is and
+     who the issue is assigned to.  Return this information
+     as a dictionary mapping issue numbers to a list
+     [target_milestone, assigned_to]"""
   xml_url = "http://subversion.tigris.org/issues/xml.cgi?id="
   issue_dict = {}
 
@@ -1698,14 +1721,17 @@ def get_target_milestones_for_issues(iss
     xmldoc = xml.dom.minidom.parse(issue_xml_f)
     issue_xml_f.close()
 
-    # Get the target milestone for each issue.
+    # For each issue: Get the target milestone and who
+    #                 the issue is assigned to.
     issue_element = xmldoc.getElementsByTagName('issue')
     for i in issue_element:
       issue_id_element = i.getElementsByTagName('issue_id')
       issue_id = issue_id_element[0].childNodes[0].nodeValue
       milestone_element = i.getElementsByTagName('target_milestone')
       milestone = milestone_element[0].childNodes[0].nodeValue
-      issue_dict[issue_id] = milestone
+      assignment_element = i.getElementsByTagName('assigned_to')
+      assignment = assignment_element[0].childNodes[0].nodeValue
+      issue_dict[issue_id] = [milestone, assignment]
   except:
     print "ERROR: Unable to parse target milestones from issue tracker"
     raise
@@ -1898,10 +1924,13 @@ def execute_tests(test_list, serial_only
                 options.mode_filter.upper() == test_mode or
                 (options.mode_filter.upper() == 'PASS' and test_mode == '')):
               issues_dict[issue]=issue
-      milestones_dict = get_target_milestones_for_issues(issues_dict.keys())
+      milestones_dict = get_issue_details(issues_dict.keys())
+
+    header = "Test #  Mode   Test Description\n"
+    if options.milestone_filter:
+      header += "               Issue#(Target Mileston/Assigned To)\n"
+    header += "------  -----  ----------------"
 
-    header = "Test #  Mode   Test Description\n" \
-             "------  -----  ----------------"
     printed_header = False
     for testnum in testnums:
       test_mode = TestRunner(test_list[testnum], testnum).get_mode().upper()

Modified: subversion/branches/ev2-export/subversion/tests/cmdline/update_tests.py
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/subversion/tests/cmdline/update_tests.py?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/subversion/tests/cmdline/update_tests.py (original)
+++ subversion/branches/ev2-export/subversion/tests/cmdline/update_tests.py Fri Nov  2 00:57:09
2012
@@ -5514,9 +5514,12 @@ def update_to_HEAD_plus_1(sbox):
   sbox.build(read_only = True)
   wc_dir = sbox.wc_dir
 
+  # Attempt the update, expecting an error.  (Sometimes the error
+  # strings says "No such revision", sometimes "No such target
+  # revision".)
   svntest.actions.run_and_verify_update(wc_dir,
                                         None, None, None,
-                                        ".*No such revision",
+                                        "E160006.*No such.*revision",
                                         None, None,
                                         None, None, None, wc_dir, '-r', '2')
 

Modified: subversion/branches/ev2-export/subversion/tests/libsvn_subr/named_atomic-test.c
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/subversion/tests/libsvn_subr/named_atomic-test.c?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/subversion/tests/libsvn_subr/named_atomic-test.c (original)
+++ subversion/branches/ev2-export/subversion/tests/libsvn_subr/named_atomic-test.c Fri Nov
 2 00:57:09 2012
@@ -112,6 +112,20 @@ proc_found(const char *proc, apr_pool_t 
   return result == svn_tristate_true;
 }
 
+/* Remove temporary files from disk.
+ */
+static apr_status_t
+cleanup_test_shm(void *arg)
+{
+  apr_pool_t *pool = arg;
+  
+  svn_error_clear(svn_atomic_namespace__cleanup(name_namespace, pool));
+  svn_error_clear(svn_atomic_namespace__cleanup(name_namespace1, pool));
+  svn_error_clear(svn_atomic_namespace__cleanup(name_namespace2, pool));
+
+  return 0;
+}
+
 /* Bring shared memory to a defined state. This is very useful in case of
  * lingering problems from previous tests or test runs.
  */
@@ -150,6 +164,11 @@ init_test_shm(apr_pool_t *pool)
     return svn_error_wrap_apr(SVN_ERR_TEST_SKIPPED,
                               "user has insufficient privileges");
 
+  /* destroy temp files after usage */
+
+  apr_pool_cleanup_register(pool, pool,
+                            cleanup_test_shm, apr_pool_cleanup_null);
+
   /* get the two I/O atomics for this thread */
   SVN_ERR(svn_atomic_namespace__create(&ns, name_namespace, scratch));
   SVN_ERR(svn_named_atomic__get(&atomic, ns, ATOMIC_NAME, TRUE));

Modified: subversion/branches/ev2-export/tools/client-side/svn-bench/null-log-cmd.c
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/tools/client-side/svn-bench/null-log-cmd.c?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/tools/client-side/svn-bench/null-log-cmd.c (original)
+++ subversion/branches/ev2-export/tools/client-side/svn-bench/null-log-cmd.c Fri Nov  2 00:57:09
2012
@@ -215,26 +215,28 @@ svn_cl__null_log(apr_getopt_t *os,
                           pool));
 
   if (!opt_state->quiet)
-    if (opt_state->use_merge_history)
-      SVN_ERR(svn_cmdline_printf(pool,
-                                _("%15s revisions, %15s merged in %s merges\n"
-                                  "%15s msg lines, %15s in merged revisions\n"
-                                  "%15s changes,   %15s in merged revisions\n"),
-                                svn__ui64toa_sep(lb.revisions, ',', pool),
-                                svn__ui64toa_sep(lb.merged_revs, ',', pool),
-                                svn__ui64toa_sep(lb.merges, ',', pool),
-                                svn__ui64toa_sep(lb.message_lines, ',', pool),
-                                svn__ui64toa_sep(lb.merged_message_lines, ',', pool),
-                                svn__ui64toa_sep(lb.changes, ',', pool),
-                                svn__ui64toa_sep(lb.merged_changes, ',', pool)));
-    else
-      SVN_ERR(svn_cmdline_printf(pool,
-                                _("%15s revisions\n"
-                                  "%15s msg lines\n"
-                                  "%15s changes\n"),
-                                svn__ui64toa_sep(lb.revisions, ',', pool),
-                                svn__ui64toa_sep(lb.message_lines, ',', pool),
-                                svn__ui64toa_sep(lb.changes, ',', pool)));
+    {
+      if (opt_state->use_merge_history)
+        SVN_ERR(svn_cmdline_printf(pool,
+                      _("%15s revisions, %15s merged in %s merges\n"
+                        "%15s msg lines, %15s in merged revisions\n"
+                        "%15s changes,   %15s in merged revisions\n"),
+                      svn__ui64toa_sep(lb.revisions, ',', pool),
+                      svn__ui64toa_sep(lb.merged_revs, ',', pool),
+                      svn__ui64toa_sep(lb.merges, ',', pool),
+                      svn__ui64toa_sep(lb.message_lines, ',', pool),
+                      svn__ui64toa_sep(lb.merged_message_lines, ',', pool),
+                      svn__ui64toa_sep(lb.changes, ',', pool),
+                      svn__ui64toa_sep(lb.merged_changes, ',', pool)));
+      else
+        SVN_ERR(svn_cmdline_printf(pool,
+                      _("%15s revisions\n"
+                        "%15s msg lines\n"
+                        "%15s changes\n"),
+                      svn__ui64toa_sep(lb.revisions, ',', pool),
+                      svn__ui64toa_sep(lb.message_lines, ',', pool),
+                      svn__ui64toa_sep(lb.changes, ',', pool)));
+    }
 
   return SVN_NO_ERROR;
 }

Modified: subversion/branches/ev2-export/tools/dev/benchmarks/suite1/benchmark.py
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/tools/dev/benchmarks/suite1/benchmark.py?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/tools/dev/benchmarks/suite1/benchmark.py (original)
+++ subversion/branches/ev2-export/tools/dev/benchmarks/suite1/benchmark.py Fri Nov  2 00:57:09
2012
@@ -17,25 +17,52 @@
 # specific language governing permissions and limitations
 # under the License.
 
-"""Usage: benchmark.py run|list|compare|show|chart ...
+"""Usage: benchmark.py run|list|compare|show|chart <selection> ...
+
+SELECTING TIMINGS -- B@R,LxS
+ 
+In the subcommands below, a timings selection consists of a string with up to
+four elements:
+  <branch>@<revision>,<levels>x<spread> 
+abbreviated as:
+  B@R,LxS
+
+<branch> is a label of an svn branch, e.g. "1.7.x".
+<revision> is the last-changed-revision of above branch.
+<levels> is the number of directory levels created in the benchmark.
+<spread> is the number of child trees spreading off each dir level.
+
+<branch_name> and <revision> are simply used for labeling. Upon the actual
+test runs, you should enter labels matching the selected --svn-bin-dir.
+Later, you can select runs individually by using these labels.
+
+For <revision>, you can provide special keywords:
+- 'each' has the same effect as entering each available revision number that
+  is on record in the db in a separate timings selection.
+- 'last' is the same as 'each', but shows only the last 10 revisions. 'last'
+  can be combined with a number, e.g. 'last12'.
+
+For all subcommands except 'run', you can omit some or all of the elements of
+a timings selection to combine all available timings sets. Try that out with
+the 'list' subcommand.
+
+Examples:
+  benchmark.py run 1.7.x@12345,5x5
+  benchmark.py show trunk@12345
+  benchmark.py compare 1.7.0,1x100 trunk@each,1x100
+  benchmark.py chart compare 1.7.0,5x5 trunk@last12,5x5
+
 
 RUN BENCHMARKS
 
-  benchmark.py run <branch>@<revision>,<levels>x<spread> [N] [options]
+  benchmark.py run B@R,LxS [N] [options]
 
 Test data is added to an sqlite database created automatically, by default
 'benchmark.db' in the current working directory. To specify a different path,
 use option -f <path_to_db>.
 
-<branch_name> is a label of the svn branch you're testing, e.g. "1.7.x".
-<revision> is the last-changed-revision of above branch.
-<levels> is the number of directory levels to create
-<spread> is the number of child trees spreading off each dir level
 If <N> is provided, the run is repeated N times.
 
-<branch_name> and <revision> are simply used for later reference. You
-should enter labels matching the selected --svn-bin-dir.
-
 <levels> and <spread> control the way the tested working copy is structured:
   <levels>: number of directory levels to create.
   <spread>: number of files and subdirectories created in each dir.
@@ -43,22 +70,21 @@ should enter labels matching the selecte
 
 LIST WHAT IS ON RECORD
 
-  benchmark.py list [ <branch>@<rev>,<levels>x<spread> ]
+  benchmark.py list [B@R,LxS]
 
 Find entries in the database for the given constraints. Any arguments can
 be omitted. (To select only a rev, start with a '@', like '@123'; to select
 only spread, start with an 'x', like "x100".)
 
-Omit all args to get a listing of all available distinct entries.
+Call without arguments to get a listing of all available constraints.
 
 
 COMPARE TIMINGS
 
-  benchmark.py compare B@R,LxS B@R,LxS
+  benchmark.py compare B@R,LxS B@R,LxS [B@R,LxS [...]]
 
-Compare two kinds of timings (in text mode). Each B@R,LxS selects
-timings from branch, revision, WC-levels and -spread by the same labels as
-previously given for a 'run' call. Any elements can be omitted. For example:
+Compare any number of timings sets to the first provided set (in text mode).
+For example:
   benchmark.py compare 1.7.0 trunk@1349903
     Compare the total timings of all combined '1.7.0' branch runs to
     all combined runs of 'trunk'-at-revision-1349903.
@@ -66,38 +92,38 @@ previously given for a 'run' call. Any e
     Same as above, but only compare the working copy types with 5 levels
     and a spread of 5.
 
+Use the -c option to limit comparison to specific command names.
+
 
 SHOW TIMINGS
 
-  benchmark.py show <branch>@<rev>,<levels>x<spread>
+  benchmark.py show B@R,LxS [B@R,LxS [...]]
 
 Print out a summary of the timings selected from the given constraints.
-Any arguments can be omitted (like for the 'list' command).
 
 
 GENERATE CHARTS
 
   benchmark.py chart compare B@R,LxS B@R,LxS [ B@R,LxS ... ]
 
-Produce a bar chart that compares any number of sets of timings. Timing sets
-are supplied by B@R,LxS arguments (i.e. <branch>@<rev>,<levels>x<spread>
as
-provided for a 'run' call), where any number of elements may be omitted. The
-less constraints you supply, the more timings are included (try it out with
-the 'list' command). The first set is taken as a reference point for 100% and
-+0 seconds. Each following dataset produces a set of labeled bar charts.
-So, at least two constraint arguments must be provided.
+Produce a bar chart that compares any number of sets of timings.  Like with
+the plain 'compare' command, the first set is taken as a reference point for
+100% and +-0 seconds. Each following dataset produces a set of labeled bar
+charts, grouped by svn command names. At least two timings sets must be
+provided.
 
-Use the -c option to limit charts to specific command names.
+Use the -c option to limit comparison to specific command names.
 
 
 EXAMPLES
 
-# Run 3 benchmarks on svn 1.7.0. Timings are saved in benchmark.db.
+# Run 3 benchmarks on svn 1.7.0 with 5 dir levels and 5 files and subdirs for
+# each level (spread). Timings are saved in ./benchmark.db.
 # Provide label '1.7.0' and its Last-Changed-Rev for later reference.
-# (You may also set your $PATH instead of using --svn-bin-dir.)
 ./benchmark.py run --svn-bin-dir ~/svn-prefix/1.7.0/bin 1.7.0@1181106,5x5 3
 
 # Record 3 benchmark runs on trunk, again naming its Last-Changed-Rev.
+# (You may also set your $PATH instead of using --svn-bin-dir.)
 ./benchmark.py run --svn-bin-dir ~/svn-prefix/trunk/bin trunk@1352725,5x5 3
 
 # Work with the results of above two runs
@@ -129,12 +155,18 @@ import random
 import shutil
 import stat
 import string
+from copy import copy
 
 IGNORE_COMMANDS = ('--version', )
 TOTAL_RUN = 'TOTAL RUN'
 
 j = os.path.join
 
+def bail(msg=None):
+  if msg:
+    print msg
+  exit(1)
+
 def time_str():
   return time.strftime('%Y-%m-%d %H:%M:%S');
 
@@ -227,6 +259,7 @@ class RunKind:
     if self.levels: self.levels = int(self.levels)
     if self.spread: self.spread = int(self.spread)
 
+  def label(self):
     label_parts = []
     if self.branch:
       label_parts.append(self.branch)
@@ -240,12 +273,52 @@ class RunKind:
       if self.spread:
         label_parts.append(RUN_KIND_SEPARATORS[2])
         label_parts.append(str(self.spread))
-    self.label = ''.join(label_parts)
+    return ''.join(label_parts)
 
   def args(self):
     return (self.branch, self.revision, self.levels, self.spread)
 
 
+def parse_timings_selections(db, *args):
+  run_kinds = []
+
+  for arg in args:
+    run_kind = RunKind(arg)
+
+    if run_kind.revision == 'each':
+      run_kind.revision = None
+      query = TimingQuery(db, run_kind)
+      for revision in query.get_sorted_revisions():
+        revision_run_kind = copy(run_kind)
+        revision_run_kind.revision = revision
+        run_kinds.append(revision_run_kind)
+    elif run_kind.revision and run_kind.revision.startswith('last'):
+      Nstr = run_kind.revision[4:]
+      if not Nstr:
+        N = 10
+      else:
+        N = int(Nstr)
+      run_kind.revision = None
+      query = TimingQuery(db, run_kind)
+      for revision in query.get_sorted_revisions()[-N:]:
+        revision_run_kind = copy(run_kind)
+        revision_run_kind.revision = revision
+        run_kinds.append(revision_run_kind)
+    else:
+      run_kinds.append(run_kind)
+
+  return run_kinds
+  
+def parse_one_timing_selection(db, *args):
+  run_kinds = parse_timings_selections(db, *args)
+  if len(run_kinds) != 1:
+    bail("I need exactly one timings identifier, not '%s'"
+         % (' '.join(*args)))
+  return run_kinds[0]
+
+
+
+
 PATHNAME_VALID_CHARS = "-_.,@%s%s" % (string.ascii_letters, string.digits)
 def filesystem_safe_string(s):
   return ''.join(c for c in s if c in PATHNAME_VALID_CHARS)
@@ -436,15 +509,19 @@ class TimingQuery:
               AND b.batch_id = r.batch_id
               AND r.aborted = 0
          """
-    self.append_constraint('k', 'branch', run_kind.branch)
-    self.append_constraint('k', 'revision', run_kind.revision)
-    self.append_constraint('k', 'wc_levels', run_kind.levels)
-    self.append_constraint('k', 'wc_spread', run_kind.spread)
-    self.label = run_kind.label
+    self.append_constraint('k.branch', run_kind.branch)
+    self.each_revision = False
+    if run_kind.revision == 'each':
+      self.each_revision = True
+    else:
+      self.append_constraint('k.revision', run_kind.revision)
+    self.append_constraint('k.wc_levels', run_kind.levels)
+    self.append_constraint('k.wc_spread', run_kind.spread)
+    self.label = run_kind.label()
 
-  def append_constraint(self, table, name, val):
+  def append_constraint(self, column_name, val):
     if val:
-      self.constraints.append('AND %s.%s = ?' % (table, name))
+      self.constraints.append('AND %s = ?' % column_name)
       self.values.append(val)
 
   def remove_last_constraint(self):
@@ -458,7 +535,6 @@ class TimingQuery:
     query.append('ORDER BY %s' % x)
     c = db.conn.cursor()
     try:
-      #print ' '.join(query)
       c.execute(' '.join(query), self.values)
       if n == 1:
         return [tpl[0] for tpl in c.fetchall()]
@@ -500,7 +576,7 @@ class TimingQuery:
                   max(t.timing),
                   avg(t.timing)""",
              self.FROM_WHERE ]
-    self.append_constraint('t', 'command', command)
+    self.append_constraint('t.command', command)
     try:
       query.extend(self.constraints)
       c = db.conn.cursor()
@@ -816,11 +892,12 @@ def perform_run(batch, run_kind,
 
     
 def cmdline_run(db, options, run_kind_str, N=1):
-  run_kind = RunKind(run_kind_str)
+  run_kind = parse_one_timing_selection(db, run_kind_str)
+    
   N = int(N)
 
   print 'Hi, going to run a Subversion benchmark series of %d runs...' % N
-  print 'Label is %s' % run_kind.label
+  print 'Label is %s' % run_kind.label()
 
   # can we run the svn binaries?
   svn_bin = j(options.svn_bin_dir, 'svn')
@@ -829,8 +906,7 @@ def cmdline_run(db, options, run_kind_st
   for b in (svn_bin, svnadmin_bin):
     so,se = run_cmd([b, '--version'])
     if not so:
-      print "Can't run", b
-      exit(1)
+      bail("Can't run %s" % b)
 
     print ', '.join([s.strip() for s in so.split('\n')[:2]])
 
@@ -844,54 +920,55 @@ def cmdline_run(db, options, run_kind_st
   batch.done()
 
 
-def cmdline_list(db, options, run_kind_str=None):
-  run_kind = RunKind(run_kind_str)
+def cmdline_list(db, options, *args):
+  run_kinds = parse_timings_selections(db, *args)
 
-  constraints = []
-  def add_if_not_none(name, val):
-    if val:
-      constraints.append('  %s = %s' % (name, val))
-  add_if_not_none('branch', run_kind.branch)
-  add_if_not_none('revision', run_kind.revision)
-  add_if_not_none('levels', run_kind.levels)
-  add_if_not_none('spread', run_kind.spread)
-  if constraints:
-    print 'For\n', '\n'.join(constraints)
-  print 'I found:'
+  for run_kind in run_kinds:
 
-  d = TimingQuery(db, run_kind)
-  
-  cmd_names = d.get_sorted_command_names()
-  if cmd_names:
-    print '\n%d command names:\n ' % len(cmd_names), '\n  '.join(cmd_names)
-
-  branches = d.get_sorted_branches()
-  if branches and (len(branches) > 1 or branches[0] != run_kind.branch):
-    print '\n%d branches:\n ' % len(branches), '\n  '.join(branches)
-
-  revisions = d.get_sorted_revisions()
-  if revisions and (len(revisions) > 1 or revisions[0] != run_kind.revision):
-    print '\n%d revisions:\n ' % len(revisions), '\n  '.join(revisions)
-
-  levels_spread = d.get_sorted_levels_spread()
-  if levels_spread and (
-       len(levels_spread) > 1
-       or levels_spread[0] != (run_kind.levels, run_kind.spread)):
-    print '\n%d kinds of levels x spread:\n ' % len(levels_spread), '\n  '.join(
-            [ ('%dx%d' % (l, s)) for l,s in levels_spread ])
+    constraints = []
+    def add_if_not_none(name, val):
+      if val:
+        constraints.append('  %s = %s' % (name, val))
+    add_if_not_none('branch', run_kind.branch)
+    add_if_not_none('revision', run_kind.revision)
+    add_if_not_none('levels', run_kind.levels)
+    add_if_not_none('spread', run_kind.spread)
+    if constraints:
+      print 'For\n', '\n'.join(constraints)
+    print 'I found:'
 
-  print "\n%d runs in %d batches.\n" % (d.count_runs_batches())
+    d = TimingQuery(db, run_kind)
+    
+    cmd_names = d.get_sorted_command_names()
+    if cmd_names:
+      print '\n%d command names:\n ' % len(cmd_names), '\n  '.join(cmd_names)
+
+    branches = d.get_sorted_branches()
+    if branches and (len(branches) > 1 or branches[0] != run_kind.branch):
+      print '\n%d branches:\n ' % len(branches), '\n  '.join(branches)
+
+    revisions = d.get_sorted_revisions()
+    if revisions and (len(revisions) > 1 or revisions[0] != run_kind.revision):
+      print '\n%d revisions:\n ' % len(revisions), '\n  '.join(revisions)
+
+    levels_spread = d.get_sorted_levels_spread()
+    if levels_spread and (
+         len(levels_spread) > 1
+         or levels_spread[0] != (run_kind.levels, run_kind.spread)):
+      print '\n%d kinds of levels x spread:\n ' % len(levels_spread), '\n  '.join(
+              [ ('%dx%d' % (l, s)) for l,s in levels_spread ])
 
+    print "\n%d runs in %d batches.\n" % (d.count_runs_batches())
 
-def cmdline_show(db, options, *run_kind_strings):
-  for run_kind_str in run_kind_strings:
-    run_kind = RunKind(run_kind_str)
 
+def cmdline_show(db, options, *run_kind_strings):
+  run_kinds = parse_timings_selections(db, *run_kind_strings)
+  for run_kind in run_kinds:
     q = TimingQuery(db, run_kind)
     timings = q.get_timings()
 
     s = []
-    s.append('Timings for %s' % run_kind.label)
+    s.append('Timings for %s' % run_kind.label())
     s.append('   N    min     max     avg   operation  (unit is seconds)')
 
     for command_name in q.get_sorted_command_names():
@@ -909,96 +986,108 @@ def cmdline_show(db, options, *run_kind_
     print '\n'.join(s)
 
 
-def cmdline_compare(db, options, left_str, right_str):
-  left_kind = RunKind(left_str)
-  right_kind = RunKind(right_str)
+def cmdline_compare(db, options, *args):
+  run_kinds = parse_timings_selections(db, *args)
+  if len(run_kinds) < 2:
+    bail("Need at least two sets of timings to compare.")
+
 
+  left_kind = run_kinds[0]
   leftq = TimingQuery(db, left_kind)
   left = leftq.get_timings()
   if not left:
-    print "No timings for", left_kind.label
-    exit(1)
+    bail("No timings for %s" % left_kind.label())
 
-  rightq = TimingQuery(db, right_kind)
-  right = rightq.get_timings()
-  if not right:
-    print "No timings for", right_kind.label
-    exit(1)
-
-  label = 'Compare %s to %s' % (left_kind.label, right_kind.label)
-
-  s = [label]
-
-  verbose = options.verbose
-  if not verbose:
-    s.append('       N        avg         operation')
-  else:
-    s.append('       N        min              max              avg         operation')
+  for run_kind_idx in range(1, len(run_kinds)):
+    right_kind = run_kinds[run_kind_idx]
 
-  command_names = [name for name in leftq.get_sorted_command_names()
-                   if name in right]
-  if options.command_names:
-    command_names = [name for name in command_names
-                     if name in options.command_names]
+    rightq = TimingQuery(db, right_kind)
+    right = rightq.get_timings()
+    if not right:
+      print "No timings for %s" % right_kind.label()
+      continue
 
-  for command_name in command_names:
-    left_N, left_min, left_max, left_avg = left[command_name]
-    right_N, right_min, right_max, right_avg = right[command_name]
-
-    N_str = '%s/%s' % (n_label(left_N), n_label(right_N))
-    avg_str = '%7.2f|%+7.3f' % (do_div(left_avg, right_avg),
-                                do_diff(left_avg, right_avg))
+    label = 'Compare %s to %s' % (right_kind.label(), left_kind.label())
 
+    s = [label]
+
+    verbose = options.verbose
     if not verbose:
-      s.append('%9s %-16s  %s' % (N_str, avg_str, command_name))
+      s.append('       N        avg         operation')
     else:
-      min_str = '%7.2f|%+7.3f' % (do_div(left_min, right_min),
-                                  do_diff(left_min, right_min))
-      max_str = '%7.2f|%+7.3f' % (do_div(left_max, right_max),
-                                  do_diff(left_max, right_max))
-
-      s.append('%9s %-16s %-16s %-16s  %s' % (N_str, min_str, max_str, avg_str,
-                                          command_name))
-
-  s.extend([
-    '(legend: "1.23|+0.45" means: slower by factor 1.23 and by 0.45 seconds;',
-    ' factor < 1 and seconds < 0 means \'%s\' is faster.'
-    % right_kind.label,
-    ' "2/3" means: \'%s\' has 2 timings on record, the other has 3.)'
-    % left_kind.label
-    ])
+      s.append('       N        min              max              avg         operation')
 
+    command_names = [name for name in leftq.get_sorted_command_names()
+                     if name in right]
+    if options.command_names:
+      command_names = [name for name in command_names
+                       if name in options.command_names]
 
-  print '\n'.join(s)
+    for command_name in command_names:
+      left_N, left_min, left_max, left_avg = left[command_name]
+      right_N, right_min, right_max, right_avg = right[command_name]
+
+      N_str = '%s/%s' % (n_label(left_N), n_label(right_N))
+      avg_str = '%7.2f|%+7.3f' % (do_div(left_avg, right_avg),
+                                  do_diff(left_avg, right_avg))
+
+      if not verbose:
+        s.append('%9s %-16s  %s' % (N_str, avg_str, command_name))
+      else:
+        min_str = '%7.2f|%+7.3f' % (do_div(left_min, right_min),
+                                    do_diff(left_min, right_min))
+        max_str = '%7.2f|%+7.3f' % (do_div(left_max, right_max),
+                                    do_diff(left_max, right_max))
+
+        s.append('%9s %-16s %-16s %-16s  %s' % (N_str, min_str, max_str, avg_str,
+                                            command_name))
+
+    s.extend([
+      '(legend: "1.23|+0.45" means: slower by factor 1.23 and by 0.45 seconds;',
+      ' factor < 1 and seconds < 0 means \'%s\' is faster.'
+      % right_kind.label(),
+      ' "2/3" means: \'%s\' has 2 timings on record, the other has 3.)'
+      % left_kind.label()
+      ])
+
+
+    print '\n'.join(s)
 
 
 # ------------------------------------------------------- charts
 
 def cmdline_chart_compare(db, options, *args):
+  import matplotlib
+  matplotlib.use('Agg')
   import numpy as np
-  import matplotlib.pyplot as plt
+  import matplotlib.pylab as plt
 
   labels = []
   timing_sets = []
   command_names = None
 
-  for arg in args:
-    run_kind = RunKind(arg)
+  run_kinds = parse_timings_selections(db, *args)
+
+  # iterate the timings selections and accumulate data
+  for run_kind in run_kinds:
     query = TimingQuery(db, run_kind)
     timings = query.get_timings()
     if not timings:
-      print "No timings for", run_kind.label
-      exit(1)
-    labels.append(run_kind.label)
+      print "No timings for %s" % run_kind.label()
+      continue
+    labels.append(run_kind.label())
     timing_sets.append(timings)
 
-    if command_names:
-      for i in range(len(command_names)):
-        if not command_names[i] in timings:
-          del command_names[i]
-    else:
+    # it only makes sense to compare those commands that have timings
+    # in the first selection, because that is the one everything else
+    # is compared to. Remember the first selection's command names.
+    if not command_names:
       command_names = query.get_sorted_command_names()
 
+
+  if len(timing_sets) < 2:
+    bail("Not enough timings")
+
   if options.command_names:
     command_names = [name for name in command_names
                      if name in options.command_names]
@@ -1009,99 +1098,125 @@ def cmdline_chart_compare(db, options, *
       [ filesystem_safe_string(l) for l in labels ]
       ) + '.svg'
                   
-  print '\nwriting chart file:', chart_path
-
   N = len(command_names)
   M = len(timing_sets) - 1
+  if M < 2:
+    M = 2
 
-  ind = np.arange(N)  # the x locations for the groups
-  width = 1. / (1.2 + M)     # the width of the bars
-  dist = 0.15
-
-  fig = plt.figure(figsize=(0.33*N*M,12))
-  plot1 = fig.add_subplot(211)
-  plot2 = fig.add_subplot(212)
-
-  # invisible lines that make sure the scale doesn't get minuscule
-  plot1.axhline(y=101, color='white', linewidth=0.01)
-  plot1.axhline(y=95.0, color='white', linewidth=0.01)
-  plot2.axhline(y=0.1, color='white', linewidth=0.01)
-  plot2.axhline(y=-0.5, color='white', linewidth=0.01)
-
-  reference = timing_sets[0]
-
-  ofs = 0
-
-  for label_i in range(1, len(labels)):
-    timings = timing_sets[label_i]
-    divs = []
-    diffs = []
-    divs_color = []
-    deviations = []
-    for command_name in command_names:
-      ref_N, ref_min, ref_max, ref_avg = reference[command_name]
-      this_N, this_min, this_max, this_avg = timings[command_name]
+  group_positions = np.arange(N)  # the y locations for the groups
+  dist = 1. / (1. + M)
+  height = (1. - dist) / M     # the height of the bars
+
+  fig = plt.figure(figsize=(12, 5 + 0.2*N*M))
+  plot1 = fig.add_subplot(121)
+  plot2 = fig.add_subplot(122)
+
+  left = timing_sets[0]
+
+  # Iterate timing sets. Each loop produces one bar for each command name
+  # group.
+  for label_i,label in enumerate(labels[1:],1):
+    right = timing_sets[label_i]
+    if not right:
+      continue
+
+    for cmd_i, command_name in enumerate(command_names):
+      if command_name not in right:
+        #skip
+        continue
+
+      left_N, left_min, left_max, left_avg = left[command_name]
+      right_N, right_min, right_max, right_avg = right[command_name]
 
-      val = 100. * (do_div(ref_avg, this_avg) - 1.0)
-      if val < 0:
+      div_avg = 100. * (do_div(left_avg, right_avg) - 1.0)
+      if div_avg <= 0:
         col = '#55dd55'
       else:
         col = '#dd5555'
-      divs.append(val)
-      divs_color.append(col)
-      diffs.append( do_diff(ref_avg, this_avg) )
-      deviations.append(this_max / this_min)
-
-    rects = plot1.bar(ind + ofs, divs, width * (1.0 - dist),
-                      color=divs_color, bottom=100.0, edgecolor='none')
-
-    for i in range(len(rects)):
-      x = rects[i].get_x() + width / 2.2
-      div = divs[i]
-      label = labels[label_i]
-
-      plot1.text(x, 100.,
-                 ' %+5.1f%% %s' % (div,label),
-                 ha='center', va='top', size='small',
-                 rotation=-90, family='monospace')
-
-    rects = plot2.bar(ind + ofs, diffs, width * 0.9,
-                   color=divs_color, bottom=0.0, edgecolor='none')
-
-    for i in range(len(rects)):
-      x = rects[i].get_x() + width / 2.2
-      diff = diffs[i]
-      label = labels[label_i]
-
-      plot2.text(x, 0.,
-                 ' %+5.2fs %s' % (diff,label),
-                 ha='center', va='top', size='small',
-                 rotation=-90, family='monospace')
-
-    ofs += width
-
-  plot1.set_title('Speed change compared to %s [%%]' % labels[0])
-  plot1.set_xticks(ind + (width / 2.))
-  plot1.set_xticklabels(command_names, rotation=-55,
-                        horizontalalignment='left',
-                        size='x-small', weight='bold')
-  plot1.axhline(y=100.0, color='#555555', linewidth=0.2)
-  plot2.set_title('[seconds]')
-  plot2.set_xticks(ind + (width / 2.))
-  plot2.set_xticklabels(command_names, rotation=-55,
-                        horizontalalignment='left',
-                        size='medium', weight='bold')
-  plot2.axhline(y=0.0, color='#555555', linewidth=0.2)
-
-  margin = 1.5/(N*M)
-  fig.subplots_adjust(bottom=0.1, top=0.97,
-                      left=margin,
-                      right=1.0-(margin / 2.))
 
-  #plot1.legend( (rects1[0], rects2[0]), (left_label, right_label) )
+      diff_val = do_diff(left_avg, right_avg)
+
+      ofs = (dist + height) / 2. + height * (label_i - 1)
+
+      barheight = height * (1.0 - dist)
+
+      y = float(cmd_i) + ofs
+
+      plot1.barh((y, ),
+                 (div_avg, ),
+                 barheight,
+                 color=col, edgecolor='white')
+      plot1.text(0., y + height/2.,
+                 '%s %+5.1f%%' % (label, div_avg),
+                 ha='right', va='center', size='small',
+                 rotation=0, family='monospace')
+
+      plot2.barh((y, ),
+                 (diff_val, ),
+                 barheight,
+                 color=col, edgecolor='white')
+      plot2.text(0., y + height/2.,
+                 '%s %+6.2fs' % (label, diff_val),
+                 ha='right', va='center', size='small',
+                 rotation=0, family='monospace')
+
+
+  for p in (plot1, plot2):
+    xlim = list(p.get_xlim())
+    if xlim[1] < 10.:
+      xlim[1] = 10.
+    # make sure the zero line is far enough right so that the annotations
+    # fit inside the chart. About half the width should suffice.
+    if xlim[0] > -xlim[1]:
+      xlim[0] = -xlim[1]
+    p.set_xlim(*xlim)
+    p.set_xticks((0,))
+    p.set_yticks(group_positions + (height / 2.))
+    p.set_yticklabels(())
+    p.set_ylim((len(command_names), 0))
+    p.grid()
+
+  plot1.set_xticklabels(('+-0%',), rotation=0)
+  plot1.set_title('Average runtime change from %s in %%' % labels[0],
+                  size='medium')
+
+  plot2.set_xticklabels(('+-0s',), rotation=0)
+  plot2.set_title('Average runtime change from %s in seconds' % labels[0],
+                  size='medium')
+
+  margin = 1./(2 + N*M)
+  titlemargin = 0
+  if options.title:
+    titlemargin = margin * 1.5
+
+  fig.subplots_adjust(left=0.005, right=0.995, wspace=0.3, bottom=margin,
+                      top=1.0-margin-titlemargin)
+
+  ystep = (1.0 - 2.*margin - titlemargin) / len(command_names)
+
+  for idx,command_name in enumerate(command_names):
+    ylabel = '%s\nvs. %.1fs' % (
+                     command_name,
+                     left[command_name][3])
+
+    ypos=1.0 - margin - titlemargin - ystep/M - ystep * idx
+    plt.figtext(0.5, ypos,
+                command_name,
+                ha='center', va='top',
+                size='medium', weight='bold')
+    plt.figtext(0.5, ypos - ystep/(M+1),
+                '%s\n= %.2fs' % (
+                  labels[0], left[command_name][3]),
+                ha='center', va='top',
+                size='small')
+
+  if options.title:
+    plt.figtext(0.5, 1. - titlemargin/2, options.title, ha='center',
+                va='center', weight='bold')
 
-  #plt.show()
   plt.savefig(chart_path)
+  print 'wrote chart file:', chart_path
+
 
 # ------------------------------------------------------------ main
 
@@ -1142,6 +1257,9 @@ if __name__ == '__main__':
   parser.add_option('-c', '--command-names', action='store',
                     dest='command_names',
                     help='Comma separated list of command names to limit to.')
+  parser.add_option('-t', '--title', action='store',
+                    dest='title',
+                    help='For charts, a title to print in the chart graphics.')
 
   parser.set_description(__doc__)
   parser.set_usage('')
@@ -1154,7 +1272,7 @@ if __name__ == '__main__':
     if msg:
       print
       print msg
-    exit(1)
+    bail()
 
   # there should be at least one arg left: the sub-command
   if not args:

Modified: subversion/branches/ev2-export/tools/dev/benchmarks/suite1/cronjob
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/tools/dev/benchmarks/suite1/cronjob?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/tools/dev/benchmarks/suite1/cronjob (original)
+++ subversion/branches/ev2-export/tools/dev/benchmarks/suite1/cronjob Fri Nov  2 00:57:09
2012
@@ -30,13 +30,7 @@
 # what it does: http://hofmeyr.de/pat/
 
 #EMAILS=your@ema.il add@ress.es
-EMAILS=""
-
-if [ "$USER" = "neels" ]; then
-  # I don't want to keep editing files after every update. ~Neels
-  EMAILS=dev@subversion.apache.org
-fi
-
+EMAILS=dev@subversion.apache.org
 
 echo
 echo "--------------------------------------------------------------------"
@@ -45,12 +39,16 @@ echo
 
 results="$(tempfile)"
 
+benchdir=/home/neels/svnbench
+patbin=/home/neels/bin/pat
+patbase=/home/neels/pat
+
 
 # first update trunk to HEAD and rebuild.
 # update/build is logged to the cronjob log (via stdout)
 
-cd /home/neels/pat/trunk
-/home/neels/bin/pat update
+cd "$patbase/trunk"
+"$patbin" update
 
 if [ "$?" -ne "0" ]; then
   subject="Failed to update to HEAD."
@@ -58,14 +56,14 @@ if [ "$?" -ne "0" ]; then
   echo "$subject"
 else
 
-  rev="$(/home/neels/pat/stable/prefix/bin/svn info /home/neels/pat/trunk/src | grep Revision)"
+  rev="$("$patbase"/stable/prefix/bin/svn info "$patbase"/trunk/src | grep Revision)"
   if [ -z "$rev" ]; then
     subject="Working copy problem."
     echo "$subject" > "$results"
     echo "$subject"
   else
 
-    NONMAINTAINER=1 /home/neels/bin/pat remake
+    NONMAINTAINER=1 "$patbin" remake
     if [ "$?" -ne "0" ]; then
       subject="Failed to build $rev."
       echo "$subject" > "$results"
@@ -76,10 +74,10 @@ else
       # updating and building succeeded!
       # run the benchmark:
 
-      compiled="$(/home/neels/pat/trunk/prefix/bin/svn --version | grep "compiled")"
+      compiled="$("$patbase"/trunk/prefix/bin/svn --version | grep "compiled")"
       subject="$rev$compiled"
 
-      cd /home/neels/svnbench/
+      cd "$benchdir"
 
       # make more or less sure that runs don't leak into each other via
       # I/O caching.
@@ -88,8 +86,8 @@ else
       # basically, just run it. But also, I want to
       # - append output to stdout, for cronjob logging.
       # - send output as mail, but only this run's output less update&build
-      "$(which time)" -p ./run 2>&1 | tee "$results"
-
+      time -p ./run 2>&1 | tee "$results"
+      time -p ./generate_charts 2>&1 | tee -a "$results"
     fi
   fi
 fi

Modified: subversion/branches/ev2-export/tools/dev/benchmarks/suite1/run
URL: http://svn.apache.org/viewvc/subversion/branches/ev2-export/tools/dev/benchmarks/suite1/run?rev=1404846&r1=1404845&r2=1404846&view=diff
==============================================================================
--- subversion/branches/ev2-export/tools/dev/benchmarks/suite1/run (original)
+++ subversion/branches/ev2-export/tools/dev/benchmarks/suite1/run Fri Nov  2 00:57:09 2012
@@ -88,9 +88,11 @@ started="$(date)"
 echo "Started at $started"
 
 echo "
-*Disclaimer* - This tests only file://-URL access on a GNU/Linux VM.
+*DISCLAIMER* - This tests only file://-URL access on a GNU/Linux VM.
 This is intended to measure changes in performance of the local working
-copy layer, *only*. These results are *not* generally true for everyone."
+copy layer, *only*. These results are *not* generally true for everyone.
+
+Charts of this data are available at http://svn-qavm.apache.org/charts/"
 
 if [ -z "$SVNBENCH_SUMMARY_ONLY" ]; then
   batch $al $as $N
@@ -139,6 +141,5 @@ done
 echo ""
 echo "Had started at $started,"
 echo "       done at $(date)"
-pwd
 } 2>&1 | tee results.txt
 

Propchange: subversion/branches/ev2-export/tools/dist/make-deps-tarball.sh
------------------------------------------------------------------------------
  Merged /subversion/branches/auto-props-sdc/tools/dist/make-deps-tarball.sh:r1384106-1401643
  Merged /subversion/trunk/tools/dist/make-deps-tarball.sh:r1400554-1404840



Mime
View raw message