httpd-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Plüm, Rüdiger, Vodafone Group <ruediger.pl...@vodafone.com>
Subject RE: PR 58267: Regression in 2.2.31 caused by r1680920
Date Tue, 25 Aug 2015 12:41:17 GMT
Thanks for the pointer. It is missing because I removed it by accident when excluding some
debug code I setup locally for analysing the issue from the patch I posted. I will post a
proper version and if you agree put it in STATUS for 2.2.x. As far as I can tell this change
only applies to 2.2.x. So it would be fine to propose it directly in STATUS without any trunk
commit.

Regards

Rüdiger

> -----Original Message-----
> From: Jan Kaluža [mailto:jkaluza@redhat.com]
> Sent: Dienstag, 25. August 2015 14:15
> To: dev@httpd.apache.org
> Subject: Re: PR 58267: Regression in 2.2.31 caused by r1680920
> 
> On 08/25/2015 11:39 AM, Plüm, Rüdiger, Vodafone Group wrote:
> > How about the following patch? It uses the server_rec of the server that
> originally created the configuration item.
> 
> This looks like good strategy. I've verified that the patch fixes this
> issue and does not break anything when testing briefly.
> 
> What do you think, Yann?
> 
> Note that "id_server" declaration is missing in the patch. Otherwise
> it's OK.
> 
> Regards,
> Jan Kaluza
> 
> > Index: modules/proxy/proxy_util.c
> > ===================================================================
> > --- modules/proxy/proxy_util.c	(revision 1697578)
> > +++ modules/proxy/proxy_util.c	(working copy)
> > @@ -1460,6 +1460,7 @@
> >       (*worker)->flush_packets = flush_off;
> >       (*worker)->flush_wait = PROXY_FLUSH_WAIT;
> >       (*worker)->smax = -1;
> > +    (*worker)->server = conf->s;
> >       /* Increase the total worker count */
> >       proxy_lb_workers++;
> >       init_conn_pool(p, *worker);
> > @@ -1824,14 +1829,20 @@
> >               apr_md5_update(&ctx, (unsigned char *)balancer->name,
> >                              strlen(balancer->name));
> >           }
> > -        if (server) {
> > +        if (worker->server) {
> > +            id_server = worker->server;
> > +        }
> > +        else {
> > +            id_server = server;
> > +        }
> > +        if (id_server) {
> >               server_addr_rec *addr;
> >               /* Assumes the unique identifier of a vhost is its
> address(es)
> >                * plus the ServerName:Port. Should two or more vhosts
> have this
> >                * same identifier, the first one would always be elected
> to
> >                * handle the requests, so this shouldn't be an issue...
> >                */
> > -            for (addr = server->addrs; addr; addr = addr->next) {
> > +            for (addr = id_server->addrs; addr; addr = addr->next) {
> >                   char host_ip[64]; /* for any IPv[46] string */
> >                   apr_sockaddr_ip_getbuf(host_ip, sizeof host_ip,
> >                                          addr->host_addr);
> > @@ -1840,10 +1851,10 @@
> >                   apr_md5_update(&ctx, (unsigned char *)&addr-
> >host_port,
> >                                  sizeof(addr->host_port));
> >               }
> > -            apr_md5_update(&ctx, (unsigned char *)server-
> >server_hostname,
> > -                           strlen(server->server_hostname));
> > -            apr_md5_update(&ctx, (unsigned char *)&server->port,
> > -                           sizeof(server->port));
> > +            apr_md5_update(&ctx, (unsigned char *)id_server-
> >server_hostname,
> > +                           strlen(id_server->server_hostname));
> > +            apr_md5_update(&ctx, (unsigned char *)&id_server->port,
> > +                           sizeof(id_server->port));
> >           }
> >           apr_md5_final(digest, &ctx);
> >
> > Index: modules/proxy/mod_proxy.c
> > ===================================================================
> > --- modules/proxy/mod_proxy.c	(revision 1697578)
> > +++ modules/proxy/mod_proxy.c	(working copy)
> > @@ -1129,6 +1129,7 @@
> >       ps->badopt = bad_error;
> >       ps->badopt_set = 0;
> >       ps->pool = p;
> > +    ps->s = s;
> >
> >       return ps;
> >   }
> > @@ -1172,6 +1173,7 @@
> >       ps->proxy_status = (overrides->proxy_status_set == 0) ? base-
> >proxy_status : overrides->proxy_status;
> >       ps->proxy_status_set = overrides->proxy_status_set || base-
> >proxy_status_set;
> >       ps->pool = p;
> > +    ps->s = overrides->s;
> >       return ps;
> >   }
> >
> > Index: modules/proxy/mod_proxy.h
> > ===================================================================
> > --- modules/proxy/mod_proxy.h	(revision 1697578)
> > +++ modules/proxy/mod_proxy.h	(working copy)
> > @@ -193,6 +193,7 @@
> >       } proxy_status;             /* Status display options */
> >       char proxy_status_set;
> >       apr_pool_t *pool;           /* Pool used for allocating this
> struct */
> > +    server_rec *s;              /* The server_rec where this
> configuration was created in */
> >   } proxy_server_conf;
> >
> >
> > @@ -369,6 +370,7 @@
> >       char            disablereuse_set;
> >       apr_interval_time_t conn_timeout;
> >       char            conn_timeout_set;
> > +    server_rec      *server;    /* The server_rec where this
> configuration was created in */
> >   };
> >
> >   /*
> >
> >
> > Regards
> >
> > Rüdiger
> >
> >> -----Original Message-----
> >> From: Plüm, Rüdiger, Vodafone Group
> >> Sent: Dienstag, 25. August 2015 10:48
> >> To: dev@httpd.apache.org
> >> Subject: RE: PR 58267: Regression in 2.2.31 caused by r1680920
> >>
> >> I think the current state of 2.2.31 breaks existing 2.2.x configuration
> >> prior to 2.2.31.
> >> Prior to 2.2.31 you could do the following:
> >>
> >> <Proxy Balancer://proxy1>
> >> BalancerMember ajp://127.0.0.1:7001
> >> BalancerMember ajp://127.0.0.2:7002
> >> </Proxy>
> >>
> >> <virtualhost *:80>
> >> ProxyPass / balancer://proxy1/
> >> </virtualhost>
> >>
> >> <virtualhost 127.0.0.1:8080>
> >>
> >> <Location /bmanager>
> >>     sethandler balancer-manager
> >> </Location>
> >>
> >> </virtualhost>
> >>
> >> With this configuration you could provide your reverse proxy to the
> >> outside world while having the
> >> balancermanager managing the balancer only listening to localhost. This
> is
> >> not possible any longer with 2.2.31
> >> as the worker->s is now different in each virtualhost whereas before it
> >> was the same as the worker->id was used
> >> to identify the scoreboard slot.
> >> The patches proposed so far would not change this. I think in order to
> >> revert to the old behaviour we need to
> >> store with each worker in which server_rec context it was created. e.g.
> >> adding a const char * field to the worker that would be filled with
> >> server->server_hostname. Then we could use this value for creating the
> >> md5.
> >>
> >> Regards
> >>
> >> Rüdiger
> >>
> >>> -----Original Message-----
> >>> From: Jan Kaluža [mailto:jkaluza@redhat.com]
> >>> Sent: Dienstag, 25. August 2015 10:23
> >>> To: dev@httpd.apache.org
> >>> Subject: Re: PR 58267: Regression in 2.2.31 caused by r1680920
> >>>
> >>> On 08/24/2015 11:12 PM, Yann Ylavic wrote:
> >>>> On Mon, Aug 24, 2015 at 5:51 PM, Yann Ylavic <ylavic.dev@gmail.com>
> >>> wrote:
> >>>>>
> >>>>> On Mon, Aug 24, 2015 at 4:47 PM, Jan Kaluža <jkaluza@redhat.com>
> >> wrote:
> >>>>>>
> >>>>>> 2) Increment proxy_lb_workers according to number of workers
in
> >>> balancer
> >>>>>> when using "ProxyPass /foobar/ Balancer://foobar/" in the
> >> VirtualHost.
> >>> The
> >>>>>> scoreboard would have right size and ap_proxy_set_scoreboard_lb
> >> would
> >>> not
> >>>>>> fail then.
> >>>>>
> >>>>> I think we can do this quite easily in merge_proxy_config(), by
> >>>>> incrementing proxy_lb_workers for each base->balancers->workers.
I
> >> did
> >>>>> not test it yet though.
> >>>>
> >>>> I tested the below which seems to work.
> >>>
> >>> Hm, this reserves the slots in scoreboard even when the balancers are
> >>> not used in the virtualhost, or am I wrong?
> >>>
> >>> I originally thought about increasing proxy_lb_workers only when they
> >>> are used in the ProxyPass* in the vhost.
> >>>
> >>>> Index: modules/proxy/mod_proxy.c
> >>>> ===================================================================
> >>>> --- modules/proxy/mod_proxy.c    (revision 1697358)
> >>>> +++ modules/proxy/mod_proxy.c    (working copy)
> >>>> @@ -1135,6 +1135,7 @@ static void * create_proxy_config(apr_pool_t
> *p,
> >> s
> >>>>
> >>>>    static void * merge_proxy_config(apr_pool_t *p, void *basev, void
> >>> *overridesv)
> >>>>    {
> >>>> +    int i;
> >>>>        proxy_server_conf *ps = apr_pcalloc(p,
> >> sizeof(proxy_server_conf));
> >>>>        proxy_server_conf *base = (proxy_server_conf *) basev;
> >>>>        proxy_server_conf *overrides = (proxy_server_conf *)
> overridesv;
> >>>> @@ -1147,6 +1148,13 @@ static void * merge_proxy_config(apr_pool_t
> *p,
> >>> vo
> >>>>        ps->allowed_connect_ports = apr_array_append(p,
> >>>> base->allowed_connect_ports, overrides->allowed_connect_ports);
> >>>>        ps->workers = apr_array_append(p, base->workers, overrides-
> >>>> workers);
> >>>>        ps->balancers = apr_array_append(p, base->balancers,
> overrides-
> >>>> balancers);
> >>>> +    /* The balancers inherited from base don't have their members
> >>> reserved on
> >>>> +     * the scorebord_lb for this server, account for them now.
> >>>> +     */
> >>>> +    for (i = 0; i < base->balancers->nelts; ++i) {
> >>>> +        proxy_balancer *balancer = (proxy_balancer *)base-
> >balancers-
> >>>> elts + i;
> >>>> +        proxy_lb_workers += balancer->workers->nelts;
> >>>> +    }
> >>>>        ps->forward = overrides->forward ? overrides->forward
: base-
> >>>> forward;
> >>>>        ps->reverse = overrides->reverse ? overrides->reverse
: base-
> >>>> reverse;
> >>>>
> >>>> --
> >>>>
> >>>> Please note that since all the workers would really be accounted in
> >>>> the scoreboard, configurations like the one of PR 58267 (with
> >>>> inherited balancers) would also need bigger SHMs (but no more) than
> >>>> currently...
> >>>>
> >>>> WDYT?
> >>>>
> >>>
> >>> Regards,
> >>> Jan Kaluza
> >>>
> >

Mime
View raw message