subversion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kot...@apache.org
Subject svn commit: r1757529 - /subversion/trunk/subversion/mod_dav_svn/repos.c
Date Wed, 24 Aug 2016 14:12:16 GMT
Author: kotkov
Date: Wed Aug 24 14:12:16 2016
New Revision: 1757529

URL: http://svn.apache.org/viewvc?rev=1757529&view=rev
Log:
In mod_dav_svn, reuse a single bucket brigade when responding to GET
requests without an "X-SVN-VR-Base" header, i.e., without a delta base.

Previously we were creating a new brigade per every chunk of data, and
that caused an excessive memory usage of around 7-8 MB while serving a
800 MB-sized response.

This was pointed out by Joe Orton <jorton@redhat.com> as a part of the
discussion in http://svn.haxx.se/dev/archive-2016-08/0028.shtml

* subversion/mod_dav_svn/repos.c
  (deliver): Rewrite the code to reuse a single bucket brigade within the
   loop, and properly call apr_brigade_destroy() when the brigade is no
   longer required.

Modified:
    subversion/trunk/subversion/mod_dav_svn/repos.c

Modified: subversion/trunk/subversion/mod_dav_svn/repos.c
URL: http://svn.apache.org/viewvc/subversion/trunk/subversion/mod_dav_svn/repos.c?rev=1757529&r1=1757528&r2=1757529&view=diff
==============================================================================
--- subversion/trunk/subversion/mod_dav_svn/repos.c (original)
+++ subversion/trunk/subversion/mod_dav_svn/repos.c Wed Aug 24 14:12:16 2016
@@ -3826,6 +3826,8 @@ deliver(const dav_resource *resource, ap
          ### which will read from the FS stream on demand */
 
       block = apr_palloc(resource->pool, SVN__STREAM_CHUNK_SIZE);
+      bb = apr_brigade_create(resource->pool, output->c->bucket_alloc);
+
       while (1) {
         apr_size_t bufsize = SVN__STREAM_CHUNK_SIZE;
 
@@ -3833,6 +3835,7 @@ deliver(const dav_resource *resource, ap
         serr = svn_stream_read_full(stream, block, &bufsize);
         if (serr != NULL)
           {
+            apr_brigade_destroy(bb);
             return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR,
                                         "could not read the file contents",
                                         resource->pool);
@@ -3840,30 +3843,32 @@ deliver(const dav_resource *resource, ap
         if (bufsize == 0)
           break;
 
-        /* build a brigade and write to the filter ... */
-        bb = apr_brigade_create(resource->pool, output->c->bucket_alloc);
+        /* write to the filter ... */
         bkt = apr_bucket_transient_create(block, bufsize,
                                           output->c->bucket_alloc);
         APR_BRIGADE_INSERT_TAIL(bb, bkt);
         if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) {
+          apr_brigade_destroy(bb);
           /* ### that HTTP code... */
           return dav_svn__new_error(resource->pool,
                                     HTTP_INTERNAL_SERVER_ERROR, 0, status,
                                     "Could not write data to filter.");
         }
+        apr_brigade_cleanup(bb);
       }
 
       /* done with the file. write an EOS bucket now. */
-      bb = apr_brigade_create(resource->pool, output->c->bucket_alloc);
       bkt = apr_bucket_eos_create(output->c->bucket_alloc);
       APR_BRIGADE_INSERT_TAIL(bb, bkt);
       if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) {
+        apr_brigade_destroy(bb);
         /* ### that HTTP code... */
         return dav_svn__new_error(resource->pool,
                                   HTTP_INTERNAL_SERVER_ERROR, 0, status,
                                   "Could not write EOS to filter.");
       }
 
+      apr_brigade_destroy(bb);
       return NULL;
     }
 }



Mime
View raw message