diff --git a/native/mod_manager/mod_manager.c b/native/mod_manager/mod_manager.c index 392dc367..b28251ac 100644 --- a/native/mod_manager/mod_manager.c +++ b/native/mod_manager/mod_manager.c @@ -1367,7 +1367,9 @@ static char *process_config(request_rec *r, char **ptr, int *errtype) return err_msg; } /* Node part */ + ap_assert(loc_lock_nodes() == APR_SUCCESS); err_msg = process_config_node(ptr[i], ptr[i + 1], &nodeinfo, errtype); + loc_unlock_nodes(); if (err_msg != NULL) { return err_msg; } @@ -1412,8 +1414,7 @@ static char *process_config(request_rec *r, char **ptr, int *errtype) nodeinfo.mess.ResponseFieldSize = mconf->response_field_size; } /* Insert or update balancer description */ - rv = loc_lock_nodes(); - ap_assert(rv == APR_SUCCESS); + ap_assert(loc_lock_nodes() == APR_SUCCESS); if (insert_update_balancer(balancerstatsmem, &balancerinfo) != APR_SUCCESS) { loc_unlock_nodes(); *errtype = TYPEMEM; diff --git a/native/mod_proxy_cluster/mod_proxy_cluster.c b/native/mod_proxy_cluster/mod_proxy_cluster.c index 428b0ca9..87eca491 100644 --- a/native/mod_proxy_cluster/mod_proxy_cluster.c +++ b/native/mod_proxy_cluster/mod_proxy_cluster.c @@ -1740,10 +1740,8 @@ static proxy_worker *internal_find_best_byrequests(const proxy_balancer *balance /* create workers for new nodes */ if (!cache_share_for) { - ap_assert(node_storage->lock_nodes() == APR_SUCCESS); update_workers_node(conf, r->pool, r->server, 1, node_table); check_workers(conf, r->server); - node_storage->unlock_nodes(); } /* do this once now to avoid repeating find_node_context_host through loop iterations */ @@ -2992,8 +2990,10 @@ static proxy_worker *find_best_worker(const proxy_balancer *balancer, const prox return NULL; } + ap_assert(node_storage->lock_nodes() == APR_SUCCESS); candidate = internal_find_best_byrequests(balancer, conf, r, domain, failoverdomain, vhost_table, context_table, node_table); + node_storage->unlock_nodes(); if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, @@ -3240,7 +3240,6 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal check_workers(conf, r->server); node_storage->unlock_nodes(); if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url, 0))) { - /* node_storage->unlock_nodes(); */ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "proxy_cluster_pre_request: CLUSTER no balancer for %s", *url); return DECLINED; @@ -3250,8 +3249,9 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal } /* Step 2: find the session route */ - + ap_assert(node_storage->lock_nodes() == APR_SUCCESS); runtime = find_session_route(*balancer, r, &route, &sticky, url, &domain, vhost_table, context_table, node_table); + node_storage->unlock_nodes(); /* Lock the LoadBalancer * XXX: perhaps we need the process lock here @@ -3332,9 +3332,6 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal *worker = runtime; } - (*worker)->s->busy++; - apr_pool_cleanup_register(r->pool, *worker, decrement_busy_count, apr_pool_cleanup_null); - /* Also mark the context here note that find_best_worker set BALANCER_CONTEXT_ID */ context_id = apr_table_get(r->subprocess_env, "BALANCER_CONTEXT_ID"); ap_assert(node_storage->lock_nodes() == APR_SUCCESS); @@ -3346,7 +3343,9 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal /* XXX: Do we need the lock here??? */ helper = (proxy_cluster_helper *)(*worker)->context; helper->count_active++; + (*worker)->s->busy++; node_storage->unlock_nodes(); + apr_pool_cleanup_register(r->pool, *worker, decrement_busy_count, apr_pool_cleanup_null); /* * get_route_balancer already fills all of the notes and some subprocess_env @@ -3469,6 +3468,7 @@ static int proxy_cluster_post_request(proxy_worker *worker, proxy_balancer *bala worker->s->name, #endif val); + worker->s->status |= PROXY_WORKER_IN_ERROR; worker->s->error_time = apr_time_now(); break;