subversion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From prabh...@apache.org
Subject svn commit: r1489765 [11/22] - in /subversion/branches/verify-keep-going: ./ build/ build/ac-macros/ build/generator/ build/generator/templates/ contrib/hook-scripts/ contrib/server-side/fsfsfixer/ contrib/server-side/fsfsfixer/fixer/ notes/ subversion...
Date Wed, 05 Jun 2013 09:22:51 GMT
Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/dump.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/dump.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/dump.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/dump.c Wed Jun  5 09:22:43 2013
@@ -1097,7 +1097,7 @@ svn_repos_dump_fs3(svn_repos_t *repos,
 {
   const svn_delta_editor_t *dump_editor;
   void *dump_edit_baton = NULL;
-  svn_revnum_t i;
+  svn_revnum_t rev;
   svn_fs_t *fs = svn_repos_fs(repos);
   apr_pool_t *subpool = svn_pool_create(pool);
   svn_revnum_t youngest;
@@ -1129,10 +1129,6 @@ svn_repos_dump_fs3(svn_repos_t *repos,
                              _("End revision %ld is invalid "
                                "(youngest revision is %ld)"),
                              end_rev, youngest);
-  if ((start_rev == 0) && incremental)
-    incremental = FALSE; /* revision 0 looks the same regardless of
-                            whether or not this is an incremental
-                            dump, so just simplify things. */
 
   /* Write out the UUID. */
   SVN_ERR(svn_fs_get_uuid(fs, &uuid, pool));
@@ -1156,10 +1152,9 @@ svn_repos_dump_fs3(svn_repos_t *repos,
     notify = svn_repos_notify_create(svn_repos_notify_dump_rev_end,
                                      pool);
 
-  /* Main loop:  we're going to dump revision i.  */
-  for (i = start_rev; i <= end_rev; i++)
+  /* Main loop:  we're going to dump revision REV.  */
+  for (rev = start_rev; rev <= end_rev; rev++)
     {
-      svn_revnum_t from_rev, to_rev;
       svn_fs_root_t *to_root;
       svn_boolean_t use_deltas_for_rev;
 
@@ -1169,56 +1164,35 @@ svn_repos_dump_fs3(svn_repos_t *repos,
       if (cancel_func)
         SVN_ERR(cancel_func(cancel_baton));
 
-      /* Special-case the initial revision dump: it needs to contain
-         *all* nodes, because it's the foundation of all future
-         revisions in the dumpfile. */
-      if ((i == start_rev) && (! incremental))
-        {
-          /* Special-special-case a dump of revision 0. */
-          if (i == 0)
-            {
-              /* Just write out the one revision 0 record and move on.
-                 The parser might want to use its properties. */
-              SVN_ERR(write_revision_record(stream, fs, 0, subpool));
-              to_rev = 0;
-              goto loop_end;
-            }
-
-          /* Compare START_REV to revision 0, so that everything
-             appears to be added.  */
-          from_rev = 0;
-          to_rev = i;
-        }
-      else
-        {
-          /* In the normal case, we want to compare consecutive revs. */
-          from_rev = i - 1;
-          to_rev = i;
-        }
-
       /* Write the revision record. */
-      SVN_ERR(write_revision_record(stream, fs, to_rev, subpool));
+      SVN_ERR(write_revision_record(stream, fs, rev, subpool));
+
+      /* When dumping revision 0, we just write out the revision record.
+         The parser might want to use its properties. */
+      if (rev == 0)
+        goto loop_end;
 
       /* Fetch the editor which dumps nodes to a file.  Regardless of
          what we've been told, don't use deltas for the first rev of a
          non-incremental dump. */
-      use_deltas_for_rev = use_deltas && (incremental || i != start_rev);
-      SVN_ERR(get_dump_editor(&dump_editor, &dump_edit_baton, fs, to_rev,
+      use_deltas_for_rev = use_deltas && (incremental || rev != start_rev);
+      SVN_ERR(get_dump_editor(&dump_editor, &dump_edit_baton, fs, rev,
                               "", stream, &found_old_reference,
                               &found_old_mergeinfo, NULL,
                               notify_func, notify_baton,
                               start_rev, use_deltas_for_rev, FALSE, subpool));
 
       /* Drive the editor in one way or another. */
-      SVN_ERR(svn_fs_revision_root(&to_root, fs, to_rev, subpool));
+      SVN_ERR(svn_fs_revision_root(&to_root, fs, rev, subpool));
 
       /* If this is the first revision of a non-incremental dump,
          we're in for a full tree dump.  Otherwise, we want to simply
          replay the revision.  */
-      if ((i == start_rev) && (! incremental))
+      if ((rev == start_rev) && (! incremental))
         {
+          /* Compare against revision 0, so everything appears to be added. */
           svn_fs_root_t *from_root;
-          SVN_ERR(svn_fs_revision_root(&from_root, fs, from_rev, subpool));
+          SVN_ERR(svn_fs_revision_root(&from_root, fs, 0, subpool));
           SVN_ERR(svn_repos_dir_delta2(from_root, "", "",
                                        to_root, "",
                                        dump_editor, dump_edit_baton,
@@ -1232,6 +1206,7 @@ svn_repos_dump_fs3(svn_repos_t *repos,
         }
       else
         {
+          /* The normal case: compare consecutive revs. */
           SVN_ERR(svn_repos_replay2(to_root, "", SVN_INVALID_REVNUM, FALSE,
                                     dump_editor, dump_edit_baton,
                                     NULL, NULL, subpool));
@@ -1244,7 +1219,7 @@ svn_repos_dump_fs3(svn_repos_t *repos,
     loop_end:
       if (notify_func)
         {
-          notify->revision = to_rev;
+          notify->revision = rev;
           notify_func(notify_baton, notify, subpool);
         }
     }
@@ -1507,7 +1482,8 @@ svn_repos_verify_fs3(svn_repos_t *repos,
     }
 
   /* Verify global metadata and backend-specific data first. */
-  err = svn_fs_verify(svn_fs_path(fs, pool), start_rev, end_rev,
+  err = svn_fs_verify(svn_fs_path(fs, pool), svn_fs_config(fs, pool),
+                      start_rev, end_rev,
                       verify_notify, verify_notify_baton,
                       cancel_func, cancel_baton, pool);
 
@@ -1559,34 +1535,6 @@ svn_repos_verify_fs3(svn_repos_t *repos,
             break;
         }
 
-      /* Get cancellable dump editor, but with our close_directory handler. */
-      SVN_ERR(get_dump_editor(&dump_editor, &dump_edit_baton,
-                              fs, rev, "",
-                              svn_stream_empty(iterpool),
-                              NULL, NULL,
-                              verify_close_directory,
-                              notify_func, notify_baton,
-                              start_rev,
-                              FALSE, TRUE, /* use_deltas, verify */
-                              iterpool));
-      SVN_ERR(svn_delta_get_cancellation_editor(cancel_func, cancel_baton,
-                                                dump_editor, dump_edit_baton,
-                                                &cancel_editor,
-                                                &cancel_edit_baton,
-                                                iterpool));
-
-      SVN_ERR(svn_fs_revision_root(&to_root, fs, rev, iterpool));
-      SVN_ERR(svn_fs_verify_root(to_root, iterpool));
-
-      SVN_ERR(svn_repos_replay2(to_root, "", SVN_INVALID_REVNUM, FALSE,
-                                cancel_editor, cancel_edit_baton,
-                                NULL, NULL, iterpool));
-      /* While our editor close_edit implementation is a no-op, we still
-         do this for completeness. */
-      SVN_ERR(cancel_editor->close_edit(cancel_edit_baton, iterpool));
-
-      SVN_ERR(svn_fs_revision_proplist(&props, fs, rev, iterpool));
-
       if (notify_func)
         {
           notify->revision = rev;

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/fs-wrap.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/fs-wrap.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/fs-wrap.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/fs-wrap.c Wed Jun  5 09:22:43 2013
@@ -83,7 +83,7 @@ svn_repos_fs_commit_txn(const char **con
         }
     }
   svn_pool_destroy(iterpool);
-  
+
   /* Commit. */
   err = svn_fs_commit_txn(conflict_p, new_rev, txn, pool);
   if (! SVN_IS_VALID_REVNUM(*new_rev))
@@ -357,7 +357,7 @@ svn_repos_fs_change_rev_prop4(svn_repos_
         action = 'M';
 
       /* Parse the hooks-env file (if any, and if to be used). */
-      if (use_post_revprop_change_hook || use_post_revprop_change_hook)
+      if (use_pre_revprop_change_hook || use_post_revprop_change_hook)
         SVN_ERR(svn_repos__parse_hooks_env(&hooks_env, repos->hooks_env_path,
                                            pool, pool));
 
@@ -409,10 +409,8 @@ svn_repos_fs_revision_prop(svn_string_t 
   else if (readability == svn_repos_revision_access_partial)
     {
       /* Only svn:author and svn:date are fetchable. */
-      if ((strncmp(propname, SVN_PROP_REVISION_AUTHOR,
-                   sizeof(SVN_PROP_REVISION_AUTHOR)-1) != 0)
-          && (strncmp(propname, SVN_PROP_REVISION_DATE,
-                      sizeof(SVN_PROP_REVISION_DATE)-1) != 0))
+      if ((strcmp(propname, SVN_PROP_REVISION_AUTHOR) != 0)
+          && (strcmp(propname, SVN_PROP_REVISION_DATE) != 0))
         *value_p = NULL;
 
       else
@@ -740,7 +738,14 @@ pack_notify_func(void *baton,
   struct pack_notify_baton *pnb = baton;
   svn_repos_notify_t *notify;
 
-  notify = svn_repos_notify_create(pack_action + 3, pool);
+  /* Simple conversion works for these values. */
+  SVN_ERR_ASSERT(pack_action >= svn_fs_pack_notify_start
+                 && pack_action <= svn_fs_pack_notify_end_revprop);
+
+  notify = svn_repos_notify_create(pack_action
+                                   + svn_repos_notify_pack_shard_start
+                                   - svn_fs_pack_notify_start,
+                                   pool);
   notify->shard = shard;
   pnb->notify_func(pnb->notify_baton, notify, pool);
 

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/hooks.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/hooks.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/hooks.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/hooks.c Wed Jun  5 09:22:43 2013
@@ -246,7 +246,7 @@ run_hook_cmd(svn_string_t **result,
         hook_env = svn_hash_gets(hooks_env,
                                  SVN_REPOS__HOOKS_ENV_DEFAULT_SECTION);
     }
-    
+
   err = svn_io_start_cmd3(&cmd_proc, ".", cmd, args,
                           env_from_env_hash(hook_env, pool, pool),
                           FALSE, FALSE, stdin_handle, result != NULL,
@@ -376,7 +376,7 @@ parse_hooks_env_option(const char *name,
   struct parse_hooks_env_option_baton *bo = baton;
   apr_pool_t *result_pool = apr_hash_pool_get(bo->hooks_env);
   apr_hash_t *hook_env;
-  
+
   hook_env = svn_hash_gets(bo->hooks_env, bo->section);
   if (hook_env == NULL)
     {
@@ -405,7 +405,7 @@ parse_hooks_env_section(const char *name
   bo.section = name;
   bo.hooks_env = b->hooks_env;
 
-  svn_config_enumerate2(b->cfg, name, parse_hooks_env_option, &bo, pool);
+  (void)svn_config_enumerate2(b->cfg, name, parse_hooks_env_option, &bo, pool);
 
   return TRUE;
 }
@@ -421,11 +421,12 @@ svn_repos__parse_hooks_env(apr_hash_t **
 
   if (local_abspath)
     {
-      SVN_ERR(svn_config_read2(&cfg, local_abspath, FALSE, TRUE, scratch_pool));
+      SVN_ERR(svn_config_read3(&cfg, local_abspath, FALSE,
+                               TRUE, TRUE, scratch_pool));
       b.cfg = cfg;
       b.hooks_env = apr_hash_make(result_pool);
-      svn_config_enumerate_sections2(cfg, parse_hooks_env_section, &b,
-                                     scratch_pool);
+      (void)svn_config_enumerate_sections2(cfg, parse_hooks_env_section, &b,
+                                           scratch_pool);
       *hooks_env_p = b.hooks_env;
     }
   else

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/log.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/log.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/log.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/log.c Wed Jun  5 09:22:43 2013
@@ -210,19 +210,16 @@ detect_changed(apr_hash_t **changed,
       /* NOTE:  Much of this loop is going to look quite similar to
          svn_repos_check_revision_access(), but we have to do more things
          here, so we'll live with the duplication. */
-      const void *key;
-      void *val;
       svn_fs_path_change2_t *change;
       const char *path;
+      apr_ssize_t path_len;
       char action;
       svn_log_changed_path2_t *item;
 
       svn_pool_clear(subpool);
 
       /* KEY will be the path, VAL the change. */
-      apr_hash_this(hi, &key, NULL, &val);
-      path = (const char *) key;
-      change = val;
+      apr_hash_this(hi, (const void **)&path, &path_len, (void **)&change);
 
       /* Skip path if unreadable. */
       if (authz_read_func)
@@ -311,11 +308,15 @@ detect_changed(apr_hash_t **changed,
 
       if ((action == 'A') || (action == 'R'))
         {
-          const char *copyfrom_path;
-          svn_revnum_t copyfrom_rev;
+          const char *copyfrom_path = change->copyfrom_path;
+          svn_revnum_t copyfrom_rev = change->copyfrom_rev;
 
-          SVN_ERR(svn_fs_copied_from(&copyfrom_rev, &copyfrom_path,
-                                     root, path, subpool));
+          /* the following is a potentially expensive operation since on FSFS
+             we will follow the DAG from ROOT to PATH and that requires
+             actually reading the directories along the way. */
+          if (!change->copyfrom_known)
+            SVN_ERR(svn_fs_copied_from(&copyfrom_rev, &copyfrom_path,
+                                      root, path, subpool));
 
           if (copyfrom_path && SVN_IS_VALID_REVNUM(copyfrom_rev))
             {
@@ -341,7 +342,9 @@ detect_changed(apr_hash_t **changed,
                 }
             }
         }
-      svn_hash_sets(*changed, apr_pstrdup(pool, path), item);
+
+      apr_hash_set(*changed, apr_pstrmemdup(pool, path, path_len), path_len,
+                   item);
     }
 
   svn_pool_destroy(subpool);
@@ -599,7 +602,7 @@ fs_mergeinfo_changed(svn_mergeinfo_catal
      svn:mergeinfo change and is one of our paths of interest, or a
      child or [grand]parent directory thereof. */
   iterpool = svn_pool_create(scratch_pool);
-  for (hi = apr_hash_first(scratch_pool, *prefetched_changes); 
+  for (hi = apr_hash_first(scratch_pool, *prefetched_changes);
        hi;
        hi = apr_hash_next(hi))
     {
@@ -834,6 +837,11 @@ get_combined_mergeinfo_changes(svn_merge
         }
     }
 
+  /* In most revisions, there will be no mergeinfo change at all. */
+  if (   apr_hash_count(deleted_mergeinfo_catalog) == 0
+      && apr_hash_count(added_mergeinfo_catalog) == 0)
+    return SVN_NO_ERROR;
+  
   /* Check our PATHS for any changes to their inherited mergeinfo.
      (We deal with changes to mergeinfo directly *on* the paths in the
      following loop.)  */
@@ -1087,21 +1095,47 @@ fill_log_entry(svn_log_entry_t *log_entr
         }
       else
         {
-          /* Requested only some revprops... */
           int i;
-          for (i = 0; i < revprops->nelts; i++)
+
+          /* Requested only some revprops... */
+          
+          /* often only the standard revprops got requested and delivered.
+             In that case, we can simply pass the hash on. */
+          if (revprops->nelts == apr_hash_count(r_props) && !censor_revprops)
             {
-              char *name = APR_ARRAY_IDX(revprops, i, char *);
-              svn_string_t *value = svn_hash_gets(r_props, name);
-              if (censor_revprops
-                  && !(strcmp(name, SVN_PROP_REVISION_AUTHOR) == 0
-                       || strcmp(name, SVN_PROP_REVISION_DATE) == 0))
-                /* ... but we can only return author/date. */
-                continue;
-              if (log_entry->revprops == NULL)
-                log_entry->revprops = svn_hash__make(pool);
-              svn_hash_sets(log_entry->revprops, name, value);
+              log_entry->revprops = r_props;
+              for (i = 0; i < revprops->nelts; i++)
+                {
+                  const svn_string_t *name
+                    = APR_ARRAY_IDX(revprops, i, const svn_string_t *);
+                  if (!apr_hash_get(r_props, name->data, name->len))
+                    {
+                      /* hash does not match list of revprops we want */
+                      log_entry->revprops = NULL;
+                      break;
+                    }
+                }
             }
+
+          /* slow, revprop-by-revprop filtering */
+          if (log_entry->revprops == NULL)
+            for (i = 0; i < revprops->nelts; i++)
+              {
+                const svn_string_t *name
+                  = APR_ARRAY_IDX(revprops, i, const svn_string_t *);
+                svn_string_t *value
+                  = apr_hash_get(r_props, name->data, name->len);
+                if (censor_revprops
+                    && !(strncmp(name->data, SVN_PROP_REVISION_AUTHOR,
+                                 name->len) == 0
+                         || strncmp(name->data, SVN_PROP_REVISION_DATE,
+                                    name->len) == 0))
+                  /* ... but we can only return author/date. */
+                  continue;
+                if (log_entry->revprops == NULL)
+                  log_entry->revprops = svn_hash__make(pool);
+                apr_hash_set(log_entry->revprops, name->data, name->len, value);
+              }
         }
     }
 
@@ -1175,32 +1209,33 @@ send_log(svn_revnum_t rev,
       && apr_hash_count(log_target_history_as_mergeinfo))
     {
       apr_hash_index_t *hi;
-      apr_pool_t *subpool = svn_pool_create(pool);
+      apr_pool_t *iterpool = svn_pool_create(pool);
 
       /* REV was merged in, but it might already be part of the log target's
          natural history, so change our starting assumption. */
       found_rev_of_interest = FALSE;
 
       /* Look at each changed path in REV. */
-      for (hi = apr_hash_first(subpool, log_entry->changed_paths2);
+      for (hi = apr_hash_first(pool, log_entry->changed_paths2);
            hi;
            hi = apr_hash_next(hi))
         {
           svn_boolean_t path_is_in_history = FALSE;
           const char *changed_path = svn__apr_hash_index_key(hi);
           apr_hash_index_t *hi2;
-          apr_pool_t *inner_subpool = svn_pool_create(subpool);
+
+          apr_hash_this(hi, (const void**)&changed_path, NULL, NULL);
 
           /* Look at each path on the log target's mergeinfo. */
-          for (hi2 = apr_hash_first(inner_subpool,
+          for (hi2 = apr_hash_first(iterpool,
                                     log_target_history_as_mergeinfo);
                hi2;
                hi2 = apr_hash_next(hi2))
             {
-              const char *mergeinfo_path =
-                svn__apr_hash_index_key(hi2);
-              svn_rangelist_t *rangelist =
-                svn__apr_hash_index_val(hi2);
+              const char *mergeinfo_path;
+              svn_rangelist_t *rangelist;
+              apr_hash_this(hi2, (const void**)&mergeinfo_path, NULL,
+                                 (void **)&rangelist);
 
               /* Check whether CHANGED_PATH at revision REV is a child of
                  a (path, revision) tuple in LOG_TARGET_HISTORY_AS_MERGEINFO. */
@@ -1223,7 +1258,7 @@ send_log(svn_revnum_t rev,
               if (path_is_in_history)
                 break;
             }
-          svn_pool_destroy(inner_subpool);
+          svn_pool_clear(iterpool);
 
           if (!path_is_in_history)
             {
@@ -1234,7 +1269,7 @@ send_log(svn_revnum_t rev,
               break;
             }
         }
-      svn_pool_destroy(subpool);
+      svn_pool_destroy(iterpool);
     }
 
   /* If we only got changed paths the sake of detecting redundant merged
@@ -2243,6 +2278,19 @@ svn_repos_get_logs4(svn_repos_t *repos,
   svn_boolean_t descending_order;
   svn_mergeinfo_t paths_history_mergeinfo = NULL;
 
+  if (revprops)
+    {
+      int i;
+      apr_array_header_t *new_revprops
+        = apr_array_make(pool, revprops->nelts, sizeof(svn_string_t *));
+
+      for (i = 0; i < revprops->nelts; ++i)
+        APR_ARRAY_PUSH(new_revprops, svn_string_t *)
+          = svn_string_create(APR_ARRAY_IDX(revprops, i, const char *), pool);
+
+      revprops = new_revprops;
+    }
+  
   /* Setup log range. */
   SVN_ERR(svn_fs_youngest_rev(&head, fs, pool));
 

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/replay.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/replay.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/replay.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/replay.c Wed Jun  5 09:22:43 2013
@@ -704,31 +704,45 @@ path_driver_cb_func(void **dir_baton,
       /* Handle property modifications. */
       if (change->prop_mod || downgraded_copy)
         {
-          apr_array_header_t *prop_diffs;
-          apr_hash_t *old_props;
-          apr_hash_t *new_props;
-          int i;
-
-          if (source_root)
-            SVN_ERR(svn_fs_node_proplist(&old_props, source_root,
-                                         source_fspath, pool));
-          else
-            old_props = apr_hash_make(pool);
+          if (cb->compare_root)
+            {
+              apr_array_header_t *prop_diffs;
+              apr_hash_t *old_props;
+              apr_hash_t *new_props;
+              int i;
+
+              if (source_root)
+                SVN_ERR(svn_fs_node_proplist(&old_props, source_root,
+                                             source_fspath, pool));
+              else
+                old_props = apr_hash_make(pool);
 
-          SVN_ERR(svn_fs_node_proplist(&new_props, root, edit_path, pool));
+              SVN_ERR(svn_fs_node_proplist(&new_props, root, edit_path, pool));
 
-          SVN_ERR(svn_prop_diffs(&prop_diffs, new_props, old_props,
-                                 pool));
+              SVN_ERR(svn_prop_diffs(&prop_diffs, new_props, old_props,
+                                     pool));
 
-          for (i = 0; i < prop_diffs->nelts; ++i)
+              for (i = 0; i < prop_diffs->nelts; ++i)
+                {
+                  svn_prop_t *pc = &APR_ARRAY_IDX(prop_diffs, i, svn_prop_t);
+                   if (change->node_kind == svn_node_dir)
+                     SVN_ERR(editor->change_dir_prop(*dir_baton, pc->name,
+                                                     pc->value, pool));
+                   else if (change->node_kind == svn_node_file)
+                     SVN_ERR(editor->change_file_prop(file_baton, pc->name,
+                                                      pc->value, pool));
+                }
+            }
+          else
             {
-              svn_prop_t *pc = &APR_ARRAY_IDX(prop_diffs, i, svn_prop_t);
-               if (change->node_kind == svn_node_dir)
-                 SVN_ERR(editor->change_dir_prop(*dir_baton, pc->name,
-                                                 pc->value, pool));
-               else if (change->node_kind == svn_node_file)
-                 SVN_ERR(editor->change_file_prop(file_baton, pc->name,
-                                                  pc->value, pool));
+              /* Just do a dummy prop change to signal that there are *any*
+                 propmods. */
+              if (change->node_kind == svn_node_dir)
+                SVN_ERR(editor->change_dir_prop(*dir_baton, "", NULL,
+                                                pool));
+              else if (change->node_kind == svn_node_file)
+                SVN_ERR(editor->change_file_prop(file_baton, "", NULL,
+                                                 pool));
             }
         }
 

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/reporter.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/reporter.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/reporter.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/reporter.c Wed Jun  5 09:22:43 2013
@@ -1140,12 +1140,9 @@ delta_dirs(report_baton_t *b, svn_revnum
            svn_boolean_t start_empty, svn_depth_t wc_depth,
            svn_depth_t requested_depth, apr_pool_t *pool)
 {
-  svn_fs_root_t *s_root;
   apr_hash_t *s_entries = NULL, *t_entries;
   apr_hash_index_t *hi;
   apr_pool_t *subpool;
-  const char *name, *s_fullpath, *t_fullpath, *e_fullpath;
-  path_info_t *info;
 
   /* Compare the property lists.  If we're starting empty, pass a NULL
      source path so that we add all the properties.
@@ -1160,6 +1157,8 @@ delta_dirs(report_baton_t *b, svn_revnum
       /* Get the list of entries in each of source and target. */
       if (s_path && !start_empty)
         {
+          svn_fs_root_t *s_root;
+
           SVN_ERR(get_source_root(b, &s_root, s_rev));
           SVN_ERR(svn_fs_dir_entries(&s_entries, s_root, s_path, pool));
         }
@@ -1170,6 +1169,8 @@ delta_dirs(report_baton_t *b, svn_revnum
 
       while (1)
         {
+          path_info_t *info;
+          const char *name, *s_fullpath, *t_fullpath, *e_fullpath;
           const svn_fs_dirent_t *s_entry, *t_entry;
 
           svn_pool_clear(subpool);
@@ -1197,10 +1198,9 @@ delta_dirs(report_baton_t *b, svn_revnum
           t_fullpath = svn_fspath__join(t_path, name, subpool);
           t_entry = svn_hash_gets(t_entries, name);
           s_fullpath = s_path ? svn_fspath__join(s_path, name, subpool) : NULL;
-          s_entry = s_entries ?
-            svn_hash_gets(s_entries, name) : NULL;
+          s_entry = s_entries ? svn_hash_gets(s_entries, name) : NULL;
 
-          /* The only special cases here are
+          /* The only special cases where we don't process the entry are
 
              - When requested_depth is files but the reported path is
              a directory.  This is technically a client error, but we
@@ -1208,10 +1208,10 @@ delta_dirs(report_baton_t *b, svn_revnum
 
              - When the reported depth is svn_depth_exclude.
           */
-          if ((! info || info->depth != svn_depth_exclude)
-              && (requested_depth != svn_depth_files
-                  || ((! t_entry || t_entry->kind != svn_node_dir)
-                      && (! s_entry || s_entry->kind != svn_node_dir))))
+          if (! ((requested_depth == svn_depth_files
+                  && ((t_entry && t_entry->kind == svn_node_dir)
+                      || (s_entry && s_entry->kind == svn_node_dir)))
+                 || (info && info->depth == svn_depth_exclude)))
             SVN_ERR(update_entry(b, s_rev, s_fullpath, s_entry, t_fullpath,
                                  t_entry, dir_baton, e_fullpath, info,
                                  info ? info->depth
@@ -1240,13 +1240,13 @@ delta_dirs(report_baton_t *b, svn_revnum
                hi;
                hi = apr_hash_next(hi))
             {
-              const svn_fs_dirent_t *s_entry;
+              const svn_fs_dirent_t *s_entry = svn__apr_hash_index_val(hi);
 
               svn_pool_clear(subpool);
-              s_entry = svn__apr_hash_index_val(hi);
 
               if (svn_hash_gets(t_entries, s_entry->name) == NULL)
                 {
+                  const char *e_fullpath;
                   svn_revnum_t deleted_rev;
 
                   if (s_entry->kind == svn_node_file
@@ -1277,10 +1277,11 @@ delta_dirs(report_baton_t *b, svn_revnum
       /* Loop over the dirents in the target. */
       for (hi = apr_hash_first(pool, t_entries); hi; hi = apr_hash_next(hi))
         {
-          const svn_fs_dirent_t *s_entry, *t_entry;
+          const svn_fs_dirent_t *t_entry = svn__apr_hash_index_val(hi);
+          const svn_fs_dirent_t *s_entry;
+          const char *s_fullpath, *t_fullpath, *e_fullpath;
 
           svn_pool_clear(subpool);
-          t_entry = svn__apr_hash_index_val(hi);
 
           if (is_depth_upgrade(wc_depth, requested_depth, t_entry->kind))
             {
@@ -1301,11 +1302,9 @@ delta_dirs(report_baton_t *b, svn_revnum
                       || requested_depth == svn_depth_files))
                 continue;
 
-              /* Look for an entry with the same name
-                 in the source dirents. */
+              /* Look for an entry with the same name in the source dirents. */
               s_entry = s_entries ?
-                  svn_hash_gets(s_entries, t_entry->name)
-                  : NULL;
+                  svn_hash_gets(s_entries, t_entry->name) : NULL;
               s_fullpath = s_entry ?
                   svn_fspath__join(s_path, t_entry->name, subpool) : NULL;
             }
@@ -1321,8 +1320,6 @@ delta_dirs(report_baton_t *b, svn_revnum
                                subpool));
         }
 
-
-      /* Destroy iteration subpool. */
       svn_pool_destroy(subpool);
     }
   return SVN_NO_ERROR;

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.c Wed Jun  5 09:22:43 2013
@@ -1811,8 +1811,8 @@ svn_repos_recover4(const char *path,
 struct freeze_baton_t {
   apr_array_header_t *paths;
   int counter;
-  svn_repos_freeze_func_t freeze_body;
-  void *baton;
+  svn_repos_freeze_func_t freeze_func;
+  void *freeze_baton;
 };
 
 static svn_error_t *
@@ -1823,7 +1823,7 @@ multi_freeze(void *baton,
 
   if (fb->counter == fb->paths->nelts)
     {
-      SVN_ERR(fb->freeze_body(fb->baton, pool));
+      SVN_ERR(fb->freeze_func(fb->freeze_baton, pool));
       return SVN_NO_ERROR;
     }
   else
@@ -1872,16 +1872,16 @@ multi_freeze(void *baton,
    while frozen. */
 svn_error_t *
 svn_repos_freeze(apr_array_header_t *paths,
-                 svn_repos_freeze_func_t freeze_body,
-                 void *baton,
+                 svn_repos_freeze_func_t freeze_func,
+                 void *freeze_baton,
                  apr_pool_t *pool)
 {
   struct freeze_baton_t fb;
 
   fb.paths = paths;
   fb.counter = 0;
-  fb.freeze_body = freeze_body;
-  fb.baton = baton;
+  fb.freeze_func = freeze_func;
+  fb.freeze_baton = freeze_baton;
 
   SVN_ERR(multi_freeze(&fb, pool));
 

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.h
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.h?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.h (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/repos.h Wed Jun  5 09:22:43 2013
@@ -370,7 +370,7 @@ svn_repos__hooks_post_unlock(svn_repos_t
    PATH and GROUPS_PATH may be a dirent or a registry path and iff ACCEPT_URLS
    is set it may also be an absolute file url.
 
-   If PATH or GROUPS_PATH is not a valid authz rule file, then return 
+   If PATH or GROUPS_PATH is not a valid authz rule file, then return
    SVN_AUTHZ_INVALID_CONFIG.  The contents of *AUTHZ_P is then
    undefined.  If MUST_EXIST is TRUE, a missing authz or global groups file
    is also an error. */

Modified: subversion/branches/verify-keep-going/subversion/libsvn_repos/rev_hunt.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_repos/rev_hunt.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_repos/rev_hunt.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_repos/rev_hunt.c Wed Jun  5 09:22:43 2013
@@ -171,12 +171,8 @@ svn_repos_get_committed_info(svn_revnum_
   SVN_ERR(svn_fs_revision_proplist(&revprops, fs, *committed_rev, pool));
 
   /* Extract date and author from these revprops. */
-  committed_date_s = apr_hash_get(revprops,
-                                  SVN_PROP_REVISION_DATE,
-                                  sizeof(SVN_PROP_REVISION_DATE)-1);
-  last_author_s = apr_hash_get(revprops,
-                               SVN_PROP_REVISION_AUTHOR,
-                               sizeof(SVN_PROP_REVISION_AUTHOR)-1);
+  committed_date_s = svn_hash_gets(revprops, SVN_PROP_REVISION_DATE);
+  last_author_s = svn_hash_gets(revprops, SVN_PROP_REVISION_AUTHOR);
 
   *committed_date = committed_date_s ? committed_date_s->data : NULL;
   *last_author = last_author_s ? last_author_s->data : NULL;
@@ -1491,7 +1487,7 @@ get_file_revs_backwards(svn_repos_t *rep
   SVN_ERR(svn_fs_revision_root(&root, repos->fs, end, scratch_pool));
   SVN_ERR(svn_fs_check_path(&kind, root, path, scratch_pool));
   if (kind != svn_node_file)
-    return svn_error_createf(SVN_ERR_FS_NOT_FILE, 
+    return svn_error_createf(SVN_ERR_FS_NOT_FILE,
                              NULL, _("'%s' is not a file in revision %ld"),
                              path, end);
 

Propchange: subversion/branches/verify-keep-going/subversion/libsvn_subr/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Wed Jun  5 09:22:43 2013
@@ -9,3 +9,4 @@ Debug
 .*~
 libsvn_subr.def
 internal_statements.h
+errorcode.inc

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/adler32.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/adler32.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/adler32.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/adler32.c Wed Jun  5 09:22:43 2013
@@ -57,7 +57,7 @@ svn__adler32(apr_uint32_t checksum, cons
    */
   if (len >= 80)
     {
-      /* Larger buffers can be effiently handled by Marc Adler's
+      /* Larger buffers can be efficiently handled by Marc Adler's
        * optimized code. Also, new zlib versions will come with
        * SIMD code for x86 and x64.
        */
@@ -76,16 +76,16 @@ svn__adler32(apr_uint32_t checksum, cons
        * (approx. one clock tick per byte + 2 ticks loop overhead)
        */
       for (; len >= 8; len -= 8, input += 8)
-      {
-        s1 += input[0]; s2 += s1;
-        s1 += input[1]; s2 += s1;
-        s1 += input[2]; s2 += s1;
-        s1 += input[3]; s2 += s1;
-        s1 += input[4]; s2 += s1;
-        s1 += input[5]; s2 += s1;
-        s1 += input[6]; s2 += s1;
-        s1 += input[7]; s2 += s1;
-      }
+        {
+          s1 += input[0]; s2 += s1;
+          s1 += input[1]; s2 += s1;
+          s1 += input[2]; s2 += s1;
+          s1 += input[3]; s2 += s1;
+          s1 += input[4]; s2 += s1;
+          s1 += input[5]; s2 += s1;
+          s1 += input[6]; s2 += s1;
+          s1 += input[7]; s2 += s1;
+        }
 
       /* Adler-32 calculation as a simple two ticks per iteration loop.
        */

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.c Wed Jun  5 09:22:43 2013
@@ -35,6 +35,7 @@
 #include "svn_private_config.h"
 #include "svn_dso.h"
 #include "svn_version.h"
+#include "private/svn_dep_compat.h"
 
 #include "auth.h"
 
@@ -186,6 +187,15 @@ svn_auth_get_parameter(svn_auth_baton_t 
 }
 
 
+/* Return the key used to address the in-memory cache of auth
+   credentials of type CRED_KIND and associated with REALMSTRING. */
+static const char *
+make_cache_key(const char *cred_kind,
+               const char *realmstring,
+               apr_pool_t *pool)
+{
+  return apr_pstrcat(pool, cred_kind, ":", realmstring, (char *)NULL);
+}
 
 svn_error_t *
 svn_auth_first_credentials(void **credentials,
@@ -212,7 +222,7 @@ svn_auth_first_credentials(void **creden
                              cred_kind);
 
   /* First, see if we have cached creds in the auth_baton. */
-  cache_key = apr_pstrcat(pool, cred_kind, ":", realmstring, (char *)NULL);
+  cache_key = make_cache_key(cred_kind, realmstring, pool);
   creds = svn_hash_gets(auth_baton->creds_cache, cache_key);
   if (creds)
     {
@@ -241,7 +251,9 @@ svn_auth_first_credentials(void **creden
     }
 
   if (! creds)
-    *state = NULL;
+    {
+      *state = NULL;
+    }
   else
     {
       /* Build an abstract iteration state. */
@@ -295,10 +307,12 @@ svn_auth_next_credentials(void **credent
         }
       else if (provider->vtable->next_credentials)
         {
-          SVN_ERR(provider->vtable->next_credentials(
-                      &creds, state->provider_iter_baton,
-                      provider->provider_baton, auth_baton->parameters,
-                      state->realmstring, auth_baton->pool));
+          SVN_ERR(provider->vtable->next_credentials(&creds,
+                                                     state->provider_iter_baton,
+                                                     provider->provider_baton,
+                                                     auth_baton->parameters,
+                                                     state->realmstring,
+                                                     auth_baton->pool));
         }
 
       if (creds != NULL)
@@ -364,12 +378,11 @@ svn_auth_save_credentials(svn_auth_iters
       provider = APR_ARRAY_IDX(state->table->providers, i,
                                svn_auth_provider_object_t *);
       if (provider->vtable->save_credentials)
-        SVN_ERR(provider->vtable->save_credentials
-                (&save_succeeded, creds,
-                 provider->provider_baton,
-                 auth_baton->parameters,
-                 state->realmstring,
-                 pool));
+        SVN_ERR(provider->vtable->save_credentials(&save_succeeded, creds,
+                                                   provider->provider_baton,
+                                                   auth_baton->parameters,
+                                                   state->realmstring,
+                                                   pool));
 
       if (save_succeeded)
         break;
@@ -381,6 +394,32 @@ svn_auth_save_credentials(svn_auth_iters
   return SVN_NO_ERROR;
 }
 
+
+svn_error_t *
+svn_auth_forget_credentials(svn_auth_baton_t *auth_baton,
+                            const char *cred_kind,
+                            const char *realmstring,
+                            apr_pool_t *scratch_pool)
+{
+  SVN_ERR_ASSERT((cred_kind && realmstring) || (!cred_kind && !realmstring));
+
+  /* If we have a CRED_KIND and REALMSTRING, we clear out just the
+     cached item (if any).  Otherwise, empty the whole hash. */
+  if (cred_kind)
+    {
+      svn_hash_sets(auth_baton->creds_cache,
+                    make_cache_key(cred_kind, realmstring, scratch_pool),
+                    NULL);
+    }
+  else
+    {
+      apr_hash_clear(auth_baton->creds_cache);
+    }
+
+  return SVN_NO_ERROR;
+}
+
+
 svn_auth_ssl_server_cert_info_t *
 svn_auth_ssl_server_cert_info_dup
   (const svn_auth_ssl_server_cert_info_t *info, apr_pool_t *pool)
@@ -614,29 +653,3 @@ svn_auth_get_platform_specific_client_pr
 
   return SVN_NO_ERROR;
 }
-
-svn_error_t *
-svn_auth_cleanup_walk(svn_auth_baton_t *baton,
-                      svn_auth_cleanup_func_t cleanup_func,
-                      void *cleanup_baton,
-                      apr_pool_t *scratch_pool)
-{
-  /* ### FIXME: svn_auth__simple_cleanup_walk() oversteps its reach,
-     ### so this test for an SVN_AUTH_CRED_SIMPLE provider is
-     ### pointless.  Why not allow users to pass in an auth baton with
-     ### no registered providers -- after all, there's nothing
-     ### provider-centric about any of the existing plumbing.  That
-     ### plumbing is just a glorified wrapper around a bunch of shell
-     ### commands exercised on the ~/.subversion/auth tree.  --
-     ### cmpilato
-  */
-
-  if (svn_hash_gets(baton->tables, SVN_AUTH_CRED_SIMPLE))
-    {
-      SVN_ERR(svn_auth__simple_cleanup_walk(baton, cleanup_func, cleanup_baton,
-                                            baton->creds_cache, scratch_pool));
-    }
-  /* ### Maybe add support for other providers? */
-
-  return SVN_NO_ERROR;
-}

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.h
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.h?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.h (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/auth.h Wed Jun  5 09:22:43 2013
@@ -41,15 +41,6 @@ svn_auth__file_path(const char **path,
                     const char *config_dir,
                     apr_pool_t *pool);
 
-/* Implementation of svn_auth_cleanup_walk() for the "simple" provider */
-svn_error_t *
-svn_auth__simple_cleanup_walk(svn_auth_baton_t *baton,
-                              svn_auth_cleanup_func_t cleanup_func,
-                              void *cleanup_baton,
-                              apr_hash_t *creds_cache,
-                              apr_pool_t *scratch_pool);
-
-
 
 #ifdef __cplusplus
 }

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-inprocess.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-inprocess.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-inprocess.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-inprocess.c Wed Jun  5 09:22:43 2013
@@ -190,7 +190,6 @@ inprocess_cache_get_internal(char **buff
 {
   struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
 
-  *buffer = NULL;
   if (entry)
     {
       SVN_ERR(move_page_to_front(cache, entry->page));
@@ -201,6 +200,11 @@ inprocess_cache_get_internal(char **buff
 
       *size = entry->size;
     }
+  else
+    {
+      *buffer = NULL;
+      *size = 0;
+    }
 
   return SVN_NO_ERROR;
 }
@@ -213,25 +217,33 @@ inprocess_cache_get(void **value_p,
                     apr_pool_t *result_pool)
 {
   inprocess_cache_t *cache = cache_void;
-  char* buffer = NULL;
-  apr_size_t size;
 
   if (key)
-    SVN_MUTEX__WITH_LOCK(cache->mutex,
-                         inprocess_cache_get_internal(&buffer,
-                                                      &size,
-                                                      cache,
-                                                      key,
-                                                      result_pool));
+    {
+      char* buffer;
+      apr_size_t size;
 
-  /* deserialize the buffer content. Usually, this will directly
-     modify the buffer content directly.
-   */
-  *value_p = NULL;
-  *found = buffer != NULL;
-  return buffer && size
-    ? cache->deserialize_func(value_p, buffer, size, result_pool)
-    : SVN_NO_ERROR;
+      SVN_MUTEX__WITH_LOCK(cache->mutex,
+                           inprocess_cache_get_internal(&buffer,
+                                                        &size,
+                                                        cache,
+                                                        key,
+                                                        result_pool));
+      /* deserialize the buffer content. Usually, this will directly
+         modify the buffer content directly. */
+      *found = (buffer != NULL);
+      if (!buffer || !size)
+        *value_p = NULL;
+      else
+        return cache->deserialize_func(value_p, buffer, size, result_pool);
+    }
+  else
+    {
+      *value_p = NULL;
+      *found = FALSE;
+    }
+
+  return SVN_NO_ERROR;
 }
 
 /* Removes PAGE from the LRU list, removes all of its entries from
@@ -642,6 +654,7 @@ svn_cache__create_inprocess(svn_cache__t
 
   wrapper->vtable = &inprocess_cache_vtable;
   wrapper->cache_internal = cache;
+  wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
 
   *cache_p = wrapper;
   return SVN_NO_ERROR;

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-membuffer.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-membuffer.c Wed Jun  5 09:22:43 2013
@@ -45,6 +45,8 @@
  *
  * 1. A linear data buffer containing cached items in a serialized
  *    representation. There may be arbitrary gaps between entries.
+ *    This buffer is sub-devided into (currently two) cache levels.
+ *
  * 2. A directory of cache entries. This is organized similar to CPU
  *    data caches: for every possible key, there is exactly one group
  *    of entries that may contain the header info for an item with
@@ -56,23 +58,30 @@
  * between different processes and / or to persist them on disk. These
  * out-of-process features have not been implemented, yet.
  *
+ * Superficially, cache levels are being used as usual: insertion happens
+ * into L1 and evictions will promote items to L2.  But their whole point
+ * is a different one.  L1 uses a circular buffer, i.e. we have perfect
+ * caching for the last N bytes where N is the size of L1.  L2 uses a more
+ * elaborate scheme based on priorities and hit counts as described below.
+ *
  * The data buffer usage information is implicitly given by the directory
  * entries. Every USED entry has a reference to the previous and the next
  * used dictionary entry and this double-linked list is ordered by the
  * offsets of their item data within the data buffer. So removing data,
  * for instance, is done simply by unlinking it from the chain, implicitly
  * marking the entry as well as the data buffer section previously
- * associated to it as unused.
+ * associated to it as unused.  First and last element of that chain are
+ * being referenced from the respective cache level.
  *
- * Insertion can occur at only one, sliding position. It is marked by its
- * offset in the data buffer plus the index of the first used entry at or
- * behind that position. If this gap is too small to accommodate the new
- * item, the insertion window is extended as described below. The new entry
- * will always be inserted at the bottom end of the window and since the
- * next used entry is known, properly sorted insertion is possible.
+ * Insertion can occur at only one, sliding position per cache level.  It is
+ * marked by its offset in the data buffer and the index of the first used
+ * entry at or behind that position.  If this gap is too small to accommodate
+ * the new item, the insertion window is extended as described below. The new
+ * entry will always be inserted at the bottom end of the window and since
+ * the next used entry is known, properly sorted insertion is possible.
  *
  * To make the cache perform robustly in a wide range of usage scenarios,
- * a randomized variant of LFU is used (see ensure_data_insertable for
+ * L2 uses a randomized variant of LFU (see ensure_data_insertable_l2 for
  * details). Every item holds a read hit counter and there is a global read
  * hit counter. The more hits an entry has in relation to the average, the
  * more it is likely to be kept using a rand()-based condition. The test is
@@ -86,10 +95,10 @@
  * they get not used for a while. Also, even a cache thrashing situation
  * about 50% of the content survives every 50% of the cache being re-written
  * with new entries. For details on the fine-tuning involved, see the
- * comments in ensure_data_insertable().
+ * comments in ensure_data_insertable_l2().
  *
  * To limit the entry size and management overhead, not the actual item keys
- * but only their MD5 checksums will not be stored. This is reasonably safe
+ * but only their MD5-based hashes will be stored. This is reasonably safe
  * to do since users have only limited control over the full keys, even if
  * these contain folder paths. So, it is very hard to deliberately construct
  * colliding keys. Random checksum collisions can be shown to be extremely
@@ -313,7 +322,7 @@ static svn_error_t* assert_equal_tags(co
 /* A single dictionary entry. Since all entries will be allocated once
  * during cache creation, those entries might be either used or unused.
  * An entry is used if and only if it is contained in the doubly-linked
- * list of used entries.
+ * list of used entries per cache level.
  */
 typedef struct entry_t
 {
@@ -321,7 +330,8 @@ typedef struct entry_t
    */
   entry_key_t key;
 
-  /* The offset of the cached item's serialized data within the data buffer.
+  /* The offset of the cached item's serialized data within the caches
+   * DATA buffer.
    */
   apr_uint64_t offset;
 
@@ -337,15 +347,15 @@ typedef struct entry_t
 
   /* Reference to the next used entry in the order defined by offset.
    * NO_INDEX indicates the end of the list; this entry must be referenced
-   * by the caches membuffer_cache_t.last member. NO_INDEX also implies
-   * that the data buffer is not used beyond offset+size.
+   * by the caches cache_level_t.last member.  NO_INDEX also implies that
+   * the data buffer is not used beyond offset+size.
    * Only valid for used entries.
    */
   apr_uint32_t next;
 
   /* Reference to the previous used entry in the order defined by offset.
    * NO_INDEX indicates the end of the list; this entry must be referenced
-   * by the caches membuffer_cache_t.first member.
+   * by the caches cache_level_t.first member.
    * Only valid for used entries.
    */
   apr_uint32_t previous;
@@ -368,28 +378,12 @@ typedef struct entry_group_t
   entry_t entries[GROUP_SIZE];
 } entry_group_t;
 
-/* The cache header structure.
+/* Per-cache level header structure.  Instances of this are members of
+ * svn_membuffer_t and will use non-overlapping sections of its DATA buffer.
+ * All offset values are global / absolute to that whole buffer.
  */
-struct svn_membuffer_t
+typedef struct cache_level_t
 {
-  /* Number of cache segments. Must be a power of 2.
-     Please note that this structure represents only one such segment
-     and that all segments must / will report the same values here. */
-  apr_uint32_t segment_count;
-
-  /* The dictionary, GROUP_SIZE * group_count entries long. Never NULL.
-   */
-  entry_group_t *directory;
-
-  /* Flag array with group_count / GROUP_INIT_GRANULARITY _bit_ elements.
-   * Allows for efficiently marking groups as "not initialized".
-   */
-  unsigned char *group_initialized;
-
-  /* Size of dictionary in groups. Must be > 0.
-   */
-  apr_uint32_t group_count;
-
   /* Reference to the first (defined by the order content in the data
    * buffer) dictionary entry used by any data item.
    * NO_INDEX for an empty cache.
@@ -410,18 +404,46 @@ struct svn_membuffer_t
   apr_uint32_t next;
 
 
-  /* Pointer to the data buffer, data_size bytes long. Never NULL.
+  /* First offset in the caches DATA buffer that belongs to this level.
    */
-  unsigned char *data;
+  apr_uint64_t start_offset;
 
-  /* Size of data buffer in bytes. Must be > 0.
+  /* Size of data buffer allocated to this level in bytes. Must be > 0.
    */
-  apr_uint64_t data_size;
+  apr_uint64_t size;
 
   /* Offset in the data buffer where the next insertion shall occur.
    */
   apr_uint64_t current_data;
 
+} cache_level_t;
+
+/* The cache header structure.
+ */
+struct svn_membuffer_t
+{
+  /* Number of cache segments. Must be a power of 2.
+     Please note that this structure represents only one such segment
+     and that all segments must / will report the same values here. */
+  apr_uint32_t segment_count;
+
+  /* The dictionary, GROUP_SIZE * group_count entries long. Never NULL.
+   */
+  entry_group_t *directory;
+
+  /* Flag array with group_count / GROUP_INIT_GRANULARITY _bit_ elements.
+   * Allows for efficiently marking groups as "not initialized".
+   */
+  unsigned char *group_initialized;
+
+  /* Size of dictionary in groups. Must be > 0.
+   */
+  apr_uint32_t group_count;
+
+  /* Pointer to the data buffer, data_size bytes long. Never NULL.
+   */
+  unsigned char *data;
+
   /* Total number of data buffer bytes in use. This is for statistics only.
    */
   apr_uint64_t data_used;
@@ -431,6 +453,24 @@ struct svn_membuffer_t
    */
   apr_uint64_t max_entry_size;
 
+  /* The cache levels, organized as sub-buffers.  Since entries in the
+   * DIRECTORY use offsets in DATA for addressing, a cache lookup does
+   * not need to know the cache level of a specific item.  Cache levels
+   * are only used to implement a hybrid insertion / eviction strategy.
+   */
+
+  /* First cache level, i.e. most insertions happen here.  Very large
+   * items might get inserted directly into L2.  L1 is a strict FIFO
+   * ring buffer that does not care about item priorities.  All evicted
+   * items get a chance to be promoted to L2.
+   */
+  cache_level_t l1;
+
+  /* Second cache level, i.e. data evicted from L1 will be added here
+   * if the item is "important" enough or the L2 insertion window is large
+   * enough.
+   */
+  cache_level_t l2;
 
   /* Number of used dictionary entries, i.e. number of cached items.
    * In conjunction with hit_count, this is used calculate the average
@@ -521,7 +561,7 @@ write_lock_cache(svn_membuffer_t *cache,
               status = APR_SUCCESS;
             }
         }
-    
+
       if (status)
         return svn_error_wrap_apr(status,
                                   _("Can't write-lock cache mutex"));
@@ -621,6 +661,96 @@ get_index(svn_membuffer_t *cache, entry_
        + (apr_uint32_t)(entry - cache->directory[group_index].entries);
 }
 
+/* Return the cache level of ENTRY in CACHE.
+ */
+static cache_level_t *
+get_cache_level(svn_membuffer_t *cache, entry_t *entry)
+{
+  return entry->offset < cache->l1.size ? &cache->l1
+                                        : &cache->l2;
+}
+
+/* Insert ENTRY to the chain of items that belong to LEVEL in CACHE.  IDX
+ * is ENTRY's item index and is only given for efficiency.  The insertion
+ * takes place just before LEVEL->NEXT.  *CACHE will not be modified.
+ */
+static void
+chain_entry(svn_membuffer_t *cache,
+            cache_level_t *level,
+            entry_t *entry,
+            apr_uint32_t idx)
+{
+  /* insert ENTRY before this item */
+  entry_t *next = level->next == NO_INDEX
+                ? NULL
+                : get_entry(cache, level->next);
+  assert(idx == get_index(cache, entry));
+
+  /* update entry chain
+   */
+  entry->next = level->next;
+  if (level->first == NO_INDEX)
+    {
+      /* insert as the first entry and only in the chain
+       */
+      entry->previous = NO_INDEX;
+      level->last = idx;
+      level->first = idx;
+    }
+  else if (next == NULL)
+    {
+      /* insert as the last entry in the chain.
+       * Note that it cannot also be at the beginning of the chain.
+       */
+      entry->previous = level->last;
+      get_entry(cache, level->last)->next = idx;
+      level->last = idx;
+    }
+  else
+    {
+      /* insert either at the start of a non-empty list or
+       * somewhere in the middle
+       */
+      entry->previous = next->previous;
+      next->previous = idx;
+
+      if (entry->previous != NO_INDEX)
+        get_entry(cache, entry->previous)->next = idx;
+      else
+        level->first = idx;
+    }
+}
+
+/* Remove ENTRY from the chain of items that belong to LEVEL in CACHE. IDX
+ * is ENTRY's item index and is only given for efficiency.  Please note
+ * that neither *CACHE nor *ENTRY will not be modified.
+ */
+static void
+unchain_entry(svn_membuffer_t *cache,
+              cache_level_t *level,
+              entry_t *entry,
+              apr_uint32_t idx)
+{
+  assert(idx == get_index(cache, entry));
+
+  /* update 
+   */
+  if (level->next == idx)
+    level->next = entry->next;
+  
+  /* unlink it from the chain of used entries
+   */
+  if (entry->previous == NO_INDEX)
+    level->first = entry->next;
+  else
+    get_entry(cache, entry->previous)->next = entry->next;
+
+  if (entry->next == NO_INDEX)
+    level->last = entry->previous;
+  else
+    get_entry(cache, entry->next)->previous = entry->previous;
+}
+
 /* Remove the used ENTRY from the CACHE, i.e. make it "unused".
  * In contrast to insertion, removal is possible for any entry.
  */
@@ -633,6 +763,7 @@ drop_entry(svn_membuffer_t *cache, entry
   apr_uint32_t group_index = idx / GROUP_SIZE;
   entry_group_t *group = &cache->directory[group_index];
   apr_uint32_t last_in_group = group_index * GROUP_SIZE + group->used - 1;
+  cache_level_t *level = get_cache_level(cache, entry);
 
   /* Only valid to be called for used entries.
    */
@@ -646,39 +777,31 @@ drop_entry(svn_membuffer_t *cache, entry
 
   /* extend the insertion window, if the entry happens to border it
    */
-  if (idx == cache->next)
-    cache->next = entry->next;
+  if (idx == level->next)
+    level->next = entry->next;
   else
-    if (entry->next == cache->next)
+    if (entry->next == level->next)
       {
         /* insertion window starts right behind the entry to remove
          */
         if (entry->previous == NO_INDEX)
           {
             /* remove the first entry -> insertion may start at pos 0, now */
-            cache->current_data = 0;
+            level->current_data = level->start_offset;
           }
         else
           {
             /* insertion may start right behind the previous entry */
             entry_t *previous = get_entry(cache, entry->previous);
-            cache->current_data = ALIGN_VALUE(  previous->offset
+            level->current_data = ALIGN_VALUE(  previous->offset
                                               + previous->size);
           }
       }
 
   /* unlink it from the chain of used entries
    */
-  if (entry->previous == NO_INDEX)
-    cache->first = entry->next;
-  else
-    get_entry(cache, entry->previous)->next = entry->next;
-
-  if (entry->next == NO_INDEX)
-    cache->last = entry->previous;
-  else
-    get_entry(cache, entry->next)->previous = entry->previous;
-
+  unchain_entry(cache, level, entry, idx);
+  
   /* Move last entry into hole (if the removed one is not the last used).
    * We need to do this since all used entries are at the beginning of
    * the group's entries array.
@@ -689,18 +812,22 @@ drop_entry(svn_membuffer_t *cache, entry
        */
       *entry = group->entries[group->used-1];
 
+      /* this ENTRY may belong to a different cache level than the entry
+       * we have just removed */
+      level = get_cache_level(cache, entry);
+
       /* update foreign links to new index
        */
-      if (last_in_group == cache->next)
-        cache->next = idx;
+      if (last_in_group == level->next)
+        level->next = idx;
 
       if (entry->previous == NO_INDEX)
-        cache->first = idx;
+        level->first = idx;
       else
         get_entry(cache, entry->previous)->next = idx;
-      
+
       if (entry->next == NO_INDEX)
-        cache->last = idx;
+        level->last = idx;
       else
         get_entry(cache, entry->next)->previous = idx;
     }
@@ -722,16 +849,14 @@ insert_entry(svn_membuffer_t *cache, ent
   apr_uint32_t idx = get_index(cache, entry);
   apr_uint32_t group_index = idx / GROUP_SIZE;
   entry_group_t *group = &cache->directory[group_index];
-  entry_t *next = cache->next == NO_INDEX
-                ? NULL
-                : get_entry(cache, cache->next);
+  cache_level_t *level = get_cache_level(cache, entry);
 
   /* The entry must start at the beginning of the insertion window.
    * It must also be the first unused entry in the group.
    */
-  assert(entry->offset == cache->current_data);
+  assert(entry->offset == level->current_data);
   assert(idx == group_index * GROUP_SIZE + group->used);
-  cache->current_data = ALIGN_VALUE(entry->offset + entry->size);
+  level->current_data = ALIGN_VALUE(entry->offset + entry->size);
 
   /* update usage counters
    */
@@ -742,42 +867,12 @@ insert_entry(svn_membuffer_t *cache, ent
 
   /* update entry chain
    */
-  entry->next = cache->next;
-  if (cache->first == NO_INDEX)
-    {
-      /* insert as the first entry and only in the chain
-       */
-      entry->previous = NO_INDEX;
-      cache->last = idx;
-      cache->first = idx;
-    }
-  else if (next == NULL)
-    {
-      /* insert as the last entry in the chain.
-       * Note that it cannot also be at the beginning of the chain.
-       */
-      entry->previous = cache->last;
-      get_entry(cache, cache->last)->next = idx;
-      cache->last = idx;
-    }
-  else
-    {
-      /* insert either at the start of a non-empty list or
-       * somewhere in the middle
-       */
-      entry->previous = next->previous;
-      next->previous = idx;
-
-      if (entry->previous != NO_INDEX)
-        get_entry(cache, entry->previous)->next = idx;
-      else
-        cache->first = idx;
-    }
+  chain_entry(cache, level, entry, idx);
 
   /* The current insertion position must never point outside our
    * data buffer.
    */
-  assert(cache->current_data <= cache->data_size);
+  assert(level->current_data <= level->start_offset + level->size);
 }
 
 /* Map a KEY of 16 bytes to the CACHE and group that shall contain the
@@ -788,10 +883,13 @@ get_group_index(svn_membuffer_t **cache,
                 entry_key_t key)
 {
   svn_membuffer_t *segment0 = *cache;
-  
-  /* select the cache segment to use. they have all the same group_count */
-  *cache = &segment0[key[0] & (segment0->segment_count -1)];
-  return key[1] % segment0->group_count;
+
+  /* select the cache segment to use. they have all the same group_count.
+   * Since key may not be well-distributed, pre-fold it to a smaller but
+   * "denser" ranger.  The divisors are primes larger than the largest
+   * counts. */
+  *cache = &segment0[(key[1] % 2809637ull) & (segment0->segment_count - 1)];
+  return (key[0] % 5030895599ull) % segment0->group_count;
 }
 
 /* Reduce the hit count of ENTRY and update the accumulated hit info
@@ -950,6 +1048,7 @@ static void
 move_entry(svn_membuffer_t *cache, entry_t *entry)
 {
   apr_size_t size = ALIGN_VALUE(entry->size);
+  cache_level_t *level = get_cache_level(cache, entry);
 
   /* This entry survived this cleansing run. Reset half of its
    * hit count so that its removal gets more likely in the next
@@ -963,41 +1062,75 @@ move_entry(svn_membuffer_t *cache, entry
    * Size-aligned moves tend to be faster than non-aligned ones
    * because no "odd" bytes at the end need to special treatment.
    */
-  if (entry->offset != cache->current_data)
+  if (entry->offset != level->current_data)
     {
-      memmove(cache->data + cache->current_data,
+      memmove(cache->data + level->current_data,
               cache->data + entry->offset,
               size);
-      entry->offset = cache->current_data;
+      entry->offset = level->current_data;
     }
 
   /* The insertion position is now directly behind this entry.
    */
-  cache->current_data = entry->offset + size;
-  cache->next = entry->next;
+  level->current_data = entry->offset + size;
+  level->next = entry->next;
 
   /* The current insertion position must never point outside our
    * data buffer.
    */
-  assert(cache->current_data <= cache->data_size);
+  assert(level->current_data <= level->start_offset + level->size);
 }
 
-/* If necessary, enlarge the insertion window until it is at least
- * SIZE bytes long. SIZE must not exceed the data buffer size.
- * Return TRUE if enough room could be found or made. A FALSE result
+/* Move ENTRY in CACHE from L1 to L2.
+ */
+static void
+promote_entry(svn_membuffer_t *cache, entry_t *entry)
+{
+  apr_uint32_t idx = get_index(cache, entry);
+  apr_size_t size = ALIGN_VALUE(entry->size);
+  assert(get_cache_level(cache, entry) == &cache->l1);
+
+  /* copy item from the current location in L1 to the start of L2's
+   * insertion window */
+  memmove(cache->data + cache->l2.current_data,
+          cache->data + entry->offset,
+          size);
+  entry->offset = cache->l2.current_data;
+
+  /* The insertion position is now directly behind this entry.
+   */
+  cache->l2.current_data += size;
+
+  /* remove ENTRY from chain of L1 entries and put it into L2
+   */
+  unchain_entry(cache, &cache->l1, entry, idx);
+  chain_entry(cache, &cache->l2, entry, idx);
+}
+
+/* This function implements the cache insertion / eviction strategy for L2.
+ * 
+ * If necessary, enlarge the insertion window of CACHE->L2 until it is at
+ * least TO_FIT_IN->SIZE bytes long. TO_FIT_IN->SIZE must not exceed the
+ * data buffer size allocated to CACHE->L2.  IDX is the item index of
+ * TO_FIT_IN and is given for performance reasons.
+ * 
+ * Return TRUE if enough room could be found or made.  A FALSE result
  * indicates that the respective item shall not be added.
  */
 static svn_boolean_t
-ensure_data_insertable(svn_membuffer_t *cache, apr_size_t size)
+ensure_data_insertable_l2(svn_membuffer_t *cache,
+                          entry_t *to_fit_in,
+                          apr_uint32_t idx)
 {
   entry_t *entry;
   apr_uint64_t average_hit_value;
   apr_uint64_t threshold;
 
-  /* accumulated size of the entries that have been removed to make
-   * room for the new one.
-   */
-  apr_size_t drop_size = 0;
+  /* accumulated "worth" of items dropped so far */
+  apr_size_t drop_hits = 0;
+
+  /* verify parameters */
+  assert(idx == get_index(cache, to_fit_in));
 
   /* This loop will eventually terminate because every cache entry
    * would get dropped eventually:
@@ -1015,41 +1148,37 @@ ensure_data_insertable(svn_membuffer_t *
     {
       /* first offset behind the insertion window
        */
-      apr_uint64_t end = cache->next == NO_INDEX
-                       ? cache->data_size
-                       : get_entry(cache, cache->next)->offset;
+      apr_uint64_t end = cache->l2.next == NO_INDEX
+                       ? cache->l2.start_offset + cache->l2.size
+                       : get_entry(cache, cache->l2.next)->offset;
 
       /* leave function as soon as the insertion window is large enough
        */
-      if (end >= size + cache->current_data)
+      if (end >= to_fit_in->size + cache->l2.current_data)
         return TRUE;
 
-      /* Don't be too eager to cache data. Smaller items will fit into
-       * the cache after dropping a single item. Of the larger ones, we
-       * will only accept about 50%. They are also likely to get evicted
-       * soon due to their notoriously low hit counts.
-       *
-       * As long as enough similarly or even larger sized entries already
-       * exist in the cache, much less insert requests will be rejected.
+      /* if the net worth (in hits) of items removed is already larger
+       * than what we want to insert, reject TO_FIT_IN because it still
+       * does not fit in.
        */
-      if (2 * drop_size > size)
+      if (drop_hits > to_fit_in->hit_count)
         return FALSE;
 
       /* try to enlarge the insertion window
        */
-      if (cache->next == NO_INDEX)
+      if (cache->l2.next == NO_INDEX)
         {
           /* We reached the end of the data buffer; restart at the beginning.
            * Due to the randomized nature of our LFU implementation, very
            * large data items may require multiple passes. Therefore, SIZE
            * should be restricted to significantly less than data_size.
            */
-          cache->current_data = 0;
-          cache->next = cache->first;
+          cache->l2.current_data = cache->l2.start_offset;
+          cache->l2.next = cache->l2.first;
         }
       else
         {
-          entry = get_entry(cache, cache->next);
+          entry = get_entry(cache, cache->l2.next);
 
           /* Keep entries that are very small. Those are likely to be data
            * headers or similar management structures. So, they are probably
@@ -1061,14 +1190,24 @@ ensure_data_insertable(svn_membuffer_t *
             {
               move_entry(cache, entry);
             }
+          else if (cache->l2.next / GROUP_SIZE == idx / GROUP_SIZE)
+            {
+              /* Special case: we cannot drop entries that are in the same
+              * group as TO_FIT_IN because that might the latter to become
+              * invalidated it it happens to be the highest used entry in
+              * the group.  So, we must keep ENTRY unconditionally.
+              * (this is a very rare condition)
+              */
+              move_entry(cache, entry);
+            }
           else
             {
               svn_boolean_t keep;
 
               if (cache->hit_count > cache->used_entries)
                 {
-                  /* Roll the dice and determine a threshold somewhere from 0 up
-                   * to 2 times the average hit count.
+                  /* Roll the dice and determine a threshold somewhere from
+                   * 0 up to 2 times the average hit count.
                    */
                   average_hit_value = cache->hit_count / cache->used_entries;
                   threshold = (average_hit_value+1) * (rand() % 4096) / 2048;
@@ -1077,9 +1216,9 @@ ensure_data_insertable(svn_membuffer_t *
                 }
               else
                 {
-                  /* general hit count is low. Keep everything that got hit
-                   * at all and assign some 50% survival chance to everything
-                   * else.
+                  /* general hit count is low. Keep everything that got
+                   * hit at all and assign some 50% survival chance to
+                   * everything else.
                    */
                   keep = (entry->hit_count > 0) || (rand() & 1);
                 }
@@ -1087,15 +1226,16 @@ ensure_data_insertable(svn_membuffer_t *
               /* keepers or destroyers? */
               if (keep)
                 {
+                 /* Keep ENTRY and move the insertion window.
+                  */
                   move_entry(cache, entry);
                 }
               else
                 {
-                 /* Drop the entry from the end of the insertion window, if it
-                  * has been hit less than the threshold. Otherwise, keep it and
-                  * move the insertion window one entry further.
+                 /* Drop the entry from the end of the insertion window,
+                  * because it had been hit less than the threshold.
                   */
-                  drop_size += entry->size;
+                  drop_hits += entry->hit_count;
                   drop_entry(cache, entry);
                 }
             }
@@ -1106,6 +1246,70 @@ ensure_data_insertable(svn_membuffer_t *
    * right answer. */
 }
 
+/* This function implements the cache insertion / eviction strategy for L1.
+ *
+ * If necessary, enlarge the insertion window of CACHE->L1 by promoting
+ * entries to L2 until it is at least SIZE bytes long.
+ *
+ * Return TRUE if enough room could be found or made.  A FALSE result
+ * indicates that the respective item shall not be added because it is
+ * too large.
+ */
+static svn_boolean_t
+ensure_data_insertable_l1(svn_membuffer_t *cache, apr_size_t size)
+{
+  entry_t *entry;
+
+  /* Guarantees that the while loop will terminate. */
+  if (size > cache->l1.size)
+    return FALSE;
+
+  /* This loop will eventually terminate because every cache entry
+   * would get dropped eventually.
+   */
+  while (1)
+    {
+      /* first offset behind the insertion window
+       */
+      apr_uint64_t end = cache->l1.next == NO_INDEX
+                       ? cache->l1.start_offset + cache->l1.size
+                       : get_entry(cache, cache->l1.next)->offset;
+
+      /* leave function as soon as the insertion window is large enough
+       */
+      if (end >= size + cache->l1.current_data)
+        return TRUE;
+
+      /* Enlarge the insertion window
+       */
+      if (cache->l1.next == NO_INDEX)
+        {
+          /* We reached the end of the data buffer; restart at the beginning.
+           * Due to the randomized nature of our LFU implementation, very
+           * large data items may require multiple passes. Therefore, SIZE
+           * should be restricted to significantly less than data_size.
+           */
+          cache->l1.current_data = cache->l1.start_offset;
+          cache->l1.next = cache->l1.first;
+        }
+      else
+        {
+          /* Remove the entry from the end of insertion window and promote
+           * it to L2, if it is important enough.
+           */
+          entry = get_entry(cache, cache->l1.next);
+
+          if (ensure_data_insertable_l2(cache, entry, cache->l1.next))
+            promote_entry(cache, entry);
+          else
+            drop_entry(cache, entry);
+        }
+    }
+
+  /* This will never be reached. But if it was, "can't insert" was the
+   * right answer. */
+}
+
 /* Mimic apr_pcalloc in APR_POOL_DEBUG mode, i.e. handle failed allocations
  * (e.g. OOM) properly: Allocate at least SIZE bytes from POOL and zero
  * the content of the allocated memory if ZERO has been set. Return NULL
@@ -1158,7 +1362,7 @@ svn_cache__membuffer_cache_create(svn_me
     segment_count = MAX_SEGMENT_COUNT;
   if (segment_count * MIN_SEGMENT_SIZE > total_size)
     segment_count = total_size / MIN_SEGMENT_SIZE;
-    
+
   /* The segment count must be a power of two. Round it down as necessary.
    */
   while ((segment_count & (segment_count-1)) != 0)
@@ -1225,13 +1429,13 @@ svn_cache__membuffer_cache_create(svn_me
    */
   data_size = ALIGN_VALUE(total_size - directory_size + 1) - ITEM_ALIGNMENT;
 
-  /* For cache sizes > 4TB, individual cache segments will be larger
-   * than 16GB allowing for >4GB entries.  But caching chunks larger
-   * than 4GB is simply not supported.
+  /* For cache sizes > 16TB, individual cache segments will be larger
+   * than 32GB allowing for >4GB entries.  But caching chunks larger
+   * than 4GB are simply not supported.
    */
-  max_entry_size = data_size / 4 > MAX_ITEM_SIZE
+  max_entry_size = data_size / 8 > MAX_ITEM_SIZE
                  ? MAX_ITEM_SIZE
-                 : data_size / 4;
+                 : data_size / 8;
 
   /* to keep the entries small, we use 32 bit indexes only
    * -> we need to ensure that no more then 4G entries exist.
@@ -1259,13 +1463,25 @@ svn_cache__membuffer_cache_create(svn_me
          hence "unused" */
       c[seg].group_initialized = apr_pcalloc(pool, group_init_size);
 
-      c[seg].first = NO_INDEX;
-      c[seg].last = NO_INDEX;
-      c[seg].next = NO_INDEX;
+      /* Allocate 1/4th of the data buffer to L1
+       */
+      c[seg].l1.first = NO_INDEX;
+      c[seg].l1.last = NO_INDEX;
+      c[seg].l1.next = NO_INDEX;
+      c[seg].l1.start_offset = 0;
+      c[seg].l1.size = ALIGN_VALUE(data_size / 4);
+      c[seg].l1.current_data = 0;
+
+      /* The remaining 3/4th will be used as L2
+       */
+      c[seg].l2.first = NO_INDEX;
+      c[seg].l2.last = NO_INDEX;
+      c[seg].l2.next = NO_INDEX;
+      c[seg].l2.start_offset = c[seg].l1.size;
+      c[seg].l2.size = data_size - c[seg].l1.size;
+      c[seg].l2.current_data = c[seg].l2.start_offset;
 
-      c[seg].data_size = data_size;
       c[seg].data = secure_aligned_alloc(pool, (apr_size_t)data_size, FALSE);
-      c[seg].current_data = 0;
       c[seg].data_used = 0;
       c[seg].max_entry_size = max_entry_size;
 
@@ -1282,7 +1498,7 @@ svn_cache__membuffer_cache_create(svn_me
         {
           /* We are OOM. There is no need to proceed with "half a cache".
            */
-          return svn_error_wrap_apr(APR_ENOMEM, _("OOM"));
+          return svn_error_wrap_apr(APR_ENOMEM, "OOM");
         }
 
 #if APR_HAS_THREADS
@@ -1397,7 +1613,7 @@ membuffer_cache_set_internal(svn_membuff
    */
   if (   buffer != NULL
       && cache->max_entry_size >= size
-      && ensure_data_insertable(cache, size))
+      && ensure_data_insertable_l1(cache, size))
     {
       /* Remove old data for this key, if that exists.
        * Get an unused entry for the key and and initialize it with
@@ -1405,7 +1621,7 @@ membuffer_cache_set_internal(svn_membuff
        */
       entry = find_entry(cache, group_index, to_find, TRUE);
       entry->size = size;
-      entry->offset = cache->current_data;
+      entry->offset = cache->l1.current_data;
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
@@ -1758,13 +1974,13 @@ membuffer_cache_set_partial_internal(svn
                */
               drop_entry(cache, entry);
               if (   (cache->max_entry_size >= size)
-                  && ensure_data_insertable(cache, size))
+                  && ensure_data_insertable_l1(cache, size))
                 {
                   /* Write the new entry.
                    */
                   entry = find_entry(cache, group_index, to_find, TRUE);
                   entry->size = size;
-                  entry->offset = cache->current_data;
+                  entry->offset = cache->l1.current_data;
                   if (size)
                     memcpy(cache->data + entry->offset, data, size);
 
@@ -1829,6 +2045,22 @@ membuffer_cache_set_partial(svn_membuffe
  * svn_cache__t instance.
  */
 
+/* Stores the combined key value for the given key.  It will be used by
+ * combine_key() to short-circuit expensive hash calculations.
+ */
+typedef struct last_access_key_t
+{
+  /* result of key combining */
+  entry_key_t combined_key;
+
+  /* length of the key (or APR_HASH_KEY_STRING if not used) */
+  apr_size_t key_len;
+
+  /* the original key.  Only KEY_LEN bytes are valid.  We use uint32 for
+   * better compatibility with pseudo-md5 functions. */
+  apr_uint32_t key[64];
+} last_access_key_t;
+
 /* Internal cache structure (used in svn_cache__t.cache_internal) basically
  * holding the additional parameters needed to call the respective membuffer
  * functions.
@@ -1876,6 +2108,11 @@ typedef struct svn_membuffer_cache_t
    */
   int alloc_counter;
 
+  /* cache for the last key used.
+   * Will be NULL for caches with short fix-sized keys.
+   */
+  last_access_key_t *last_access;
+  
   /* if enabled, this will serialize the access to this instance.
    */
   svn_mutex__t *mutex;
@@ -1893,46 +2130,127 @@ typedef struct svn_membuffer_cache_t
  */
 #define ALLOCATIONS_PER_POOL_CLEAR 10
 
-
 /* Basically calculate a hash value for KEY of length KEY_LEN, combine it
  * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
+ * This could replace combine_key() entirely but we actually use it only
+ * when the quick path failed.
  */
 static void
-combine_key(svn_membuffer_cache_t *cache,
-            const void *key,
-            apr_ssize_t key_len)
+combine_long_key(svn_membuffer_cache_t *cache,
+                 const void *key,
+                 apr_ssize_t key_len)
 {
+  assert(cache->last_access);
+
+  /* handle variable-length keys */
   if (key_len == APR_HASH_KEY_STRING)
     key_len = strlen((const char *) key);
 
-  if (key_len < 16)
+  /* same key as the last time? -> short-circuit */
+  if (   key_len == cache->last_access->key_len
+      && memcmp(key, cache->last_access->key, key_len) == 0)
     {
-      apr_uint32_t data[4] = { 0 };
-      memcpy(data, key, key_len);
+      memcpy(cache->combined_key, cache->last_access->combined_key,
+             sizeof(cache->combined_key));
+    }
+  else if (key_len >= 64)
+    {
+      /* relatively long key.  Use the generic, slow hash code for it */
+      apr_md5((unsigned char*)cache->combined_key, key, key_len);
+      cache->combined_key[0] ^= cache->prefix[0];
+      cache->combined_key[1] ^= cache->prefix[1];
 
-      svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key, data);
+      /* is the key short enough to cache the result? */
+      if (key_len <= sizeof(cache->last_access->key))
+        {
+          memcpy(cache->last_access->combined_key, cache->combined_key,
+                 sizeof(cache->combined_key));
+          cache->last_access->key_len = key_len;
+          memcpy(cache->last_access->key, key, key_len);
+        }
     }
-  else if (key_len < 32)
+  else
     {
-      apr_uint32_t data[8] = { 0 };
-      memcpy(data, key, key_len);
+      /* shorter keys use efficient hash code and *do* cache the results */
+      cache->last_access->key_len = key_len;
+      if (key_len < 16)
+        {
+          memset(cache->last_access->key, 0, 16);
+          memcpy(cache->last_access->key, key, key_len);
 
-      svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key, data);
+          svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key,
+                             cache->last_access->key);
+        }
+      else if (key_len < 32)
+        {
+          memset(cache->last_access->key, 0, 32);
+          memcpy(cache->last_access->key, key, key_len);
+
+          svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key,
+                             cache->last_access->key);
+        }
+      else
+        {
+          memset(cache->last_access->key, 0, 64);
+          memcpy(cache->last_access->key, key, key_len);
+
+          svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key,
+                             cache->last_access->key);
+        }
+
+      cache->combined_key[0] ^= cache->prefix[0];
+      cache->combined_key[1] ^= cache->prefix[1];
+
+      memcpy(cache->last_access->combined_key, cache->combined_key,
+             sizeof(cache->combined_key));
     }
-  else if (key_len < 64)
+}
+
+/* Basically calculate a hash value for KEY of length KEY_LEN, combine it
+ * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
+ */
+static void
+combine_key(svn_membuffer_cache_t *cache,
+            const void *key,
+            apr_ssize_t key_len)
+{
+  /* copy of *key, padded with 0 */
+  apr_uint64_t data[2];
+
+  /* short, fixed-size keys are the most common case */
+  if (key_len == 16)
+    {
+      data[0] = ((const apr_uint64_t *)key)[0];
+      data[1] = ((const apr_uint64_t *)key)[1];
+    }
+  else if (key_len == 8)
+    {
+      data[0] = ((const apr_uint64_t *)key)[0];
+      data[1] = 0;
+    }
+  else if (key_len != APR_HASH_KEY_STRING && key_len < 16)
     {
-      apr_uint32_t data[16] = { 0 };
+      data[0] = 0;
+      data[1] = 0;
       memcpy(data, key, key_len);
-
-      svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key, data);
     }
   else
     {
-      apr_md5((unsigned char*)cache->combined_key, key, key_len);
+      /* longer or variably sized keys */
+      combine_long_key(cache, key, key_len);
+      return;
     }
 
-  cache->combined_key[0] ^= cache->prefix[0];
-  cache->combined_key[1] ^= cache->prefix[1];
+  /* scramble key DATA.  All of this must be reversible to prevent key
+   * collisions.  So, we limit ourselves to xor and permutations. */
+  data[1] = (data[1] << 27) | (data[1] >> 37);
+  data[0] = (data[0] << 43) | (data[0] >> 21);
+  data[1] ^= data[0] & 0xffff;
+  data[0] ^= data[1] & 0xffffffffffff0000ull;
+
+  /* combine with this cache's namespace */
+  cache->combined_key[0] = data[0] ^ cache->prefix[0];
+  cache->combined_key[1] = data[1] ^ cache->prefix[1];
 }
 
 /* Implement svn_cache__vtable_t.get (not thread-safe)
@@ -2112,9 +2430,9 @@ static svn_error_t *
 svn_membuffer_get_segment_info(svn_membuffer_t *segment,
                                svn_cache__info_t *info)
 {
-  info->data_size += segment->data_size;
+  info->data_size += segment->l1.size + segment->l2.size;
   info->used_size += segment->data_used;
-  info->total_size += segment->data_size +
+  info->total_size += segment->l1.size + segment->l2.size +
       segment->group_count * GROUP_SIZE * sizeof(entry_t);
 
   info->used_entries += segment->used_entries;
@@ -2347,6 +2665,18 @@ svn_cache__create_membuffer_cache(svn_ca
                        pool));
   memcpy(cache->prefix, checksum->digest, sizeof(cache->prefix));
 
+  /* fix-length keys of 16 bytes or under don't need a buffer because we
+   * can use a very fast key combining algorithm. */
+  if ((klen == APR_HASH_KEY_STRING) ||  klen > sizeof(entry_key_t))
+    {
+      cache->last_access = apr_pcalloc(pool, sizeof(*cache->last_access));
+      cache->last_access->key_len = APR_HASH_KEY_STRING;
+    }
+  else
+    {
+      cache->last_access = NULL;
+    }
+
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
   /* Initialize cache debugging support.
@@ -2362,6 +2692,7 @@ svn_cache__create_membuffer_cache(svn_ca
   wrapper->cache_internal = cache;
   wrapper->error_handler = 0;
   wrapper->error_baton = 0;
+  wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
 
   *cache_p = wrapper;
   return SVN_NO_ERROR;

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-memcache.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-memcache.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-memcache.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/cache-memcache.c Wed Jun  5 09:22:43 2013
@@ -407,6 +407,7 @@ svn_cache__create_memcache(svn_cache__t 
   wrapper->cache_internal = cache;
   wrapper->error_handler = 0;
   wrapper->error_baton = 0;
+  wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
 
   *cache_p = wrapper;
   return SVN_NO_ERROR;

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.c
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.c?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.c (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.c Wed Jun  5 09:22:43 2013
@@ -76,7 +76,7 @@ svn_cache__get(void **value_p,
      out with FOUND set to false. */
   *found = FALSE;
 #ifdef SVN_DEBUG
-  if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+  if (cache->pretend_empty)
     return SVN_NO_ERROR;
 #endif
 
@@ -119,7 +119,7 @@ svn_cache__iter(svn_boolean_t *completed
                 apr_pool_t *scratch_pool)
 {
 #ifdef SVN_DEBUG
-  if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+  if (cache->pretend_empty)
     /* Pretend CACHE is empty. */
     return SVN_NO_ERROR;
 #endif
@@ -146,7 +146,7 @@ svn_cache__get_partial(void **value,
   out with FOUND set to false. */
   *found = FALSE;
 #ifdef SVN_DEBUG
-  if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+  if (cache->pretend_empty)
     return SVN_NO_ERROR;
 #endif
 

Modified: subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.h
URL: http://svn.apache.org/viewvc/subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.h?rev=1489765&r1=1489764&r2=1489765&view=diff
==============================================================================
--- subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.h (original)
+++ subversion/branches/verify-keep-going/subversion/libsvn_subr/cache.h Wed Jun  5 09:22:43 2013
@@ -99,6 +99,10 @@ struct svn_cache__t {
 
   /* Total number of function calls that returned an error. */
   apr_uint64_t failures;
+
+  /* Cause all getters to act as though the cache contains no data.
+     (Currently this never becomes set except in maintainer builds.) */
+  svn_boolean_t pretend_empty;
 };
 
 



Mime
View raw message