diff options
Diffstat (limited to 'net/sunrpc/backchannel_rqst.c')
| -rw-r--r-- | net/sunrpc/backchannel_rqst.c | 103 | 
1 files changed, 70 insertions, 33 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 890a29912d5..9761a0da964 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)  	free_page((unsigned long)xbufp->head[0].iov_base);  	xbufp = &req->rq_snd_buf;  	free_page((unsigned long)xbufp->head[0].iov_base); -	list_del(&req->rq_bc_pa_list);  	kfree(req);  } @@ -168,8 +167,10 @@ out_free:  	/*  	 * Memory allocation failed, free the temporary list  	 */ -	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) +	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { +		list_del(&req->rq_bc_pa_list);  		xprt_free_allocation(req); +	}  	dprintk("RPC:       setup backchannel transport failed\n");  	return -ENOMEM; @@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)  	xprt_dec_alloc_count(xprt, max_reqs);  	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {  		dprintk("RPC:        req=%p\n", req); +		list_del(&req->rq_bc_pa_list);  		xprt_free_allocation(req);  		if (--max_reqs == 0)  			break; @@ -210,39 +212,23 @@ out:  }  EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); -/* - * One or more rpc_rqst structure have been preallocated during the - * backchannel setup.  Buffer space for the send and private XDR buffers - * has been preallocated as well.  Use xprt_alloc_bc_request to allocate - * to this request.  Use xprt_free_bc_request to return it. - * - * We know that we're called in soft interrupt context, grab the spin_lock - * since there is no need to grab the bottom half spin_lock. - * - * Return an available rpc_rqst, otherwise NULL if non are available. - */ -struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) +static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)  { -	struct rpc_rqst *req; +	struct rpc_rqst *req = NULL;  	dprintk("RPC:       allocate a backchannel request\n"); -	spin_lock(&xprt->bc_pa_lock); -	if (!list_empty(&xprt->bc_pa_list)) { -		req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, -				rq_bc_pa_list); -		list_del(&req->rq_bc_pa_list); -	} else { -		req = NULL; -	} -	spin_unlock(&xprt->bc_pa_lock); +	if (list_empty(&xprt->bc_pa_list)) +		goto not_found; -	if (req != NULL) { -		set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); -		req->rq_reply_bytes_recvd = 0; -		req->rq_bytes_sent = 0; -		memcpy(&req->rq_private_buf, &req->rq_rcv_buf, +	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, +				rq_bc_pa_list); +	req->rq_reply_bytes_recvd = 0; +	req->rq_bytes_sent = 0; +	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,  			sizeof(req->rq_private_buf)); -	} +	req->rq_xid = xid; +	req->rq_connect_cookie = xprt->connect_cookie; +not_found:  	dprintk("RPC:       backchannel req=%p\n", req);  	return req;  } @@ -257,10 +243,11 @@ void xprt_free_bc_request(struct rpc_rqst *req)  	dprintk("RPC:       free backchannel req=%p\n", req); -	smp_mb__before_clear_bit(); +	req->rq_connect_cookie = xprt->connect_cookie - 1; +	smp_mb__before_atomic();  	WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));  	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	if (!xprt_need_to_requeue(xprt)) {  		/* @@ -279,7 +266,57 @@ void xprt_free_bc_request(struct rpc_rqst *req)  	 * may be reused by a new callback request.  	 */  	spin_lock_bh(&xprt->bc_pa_lock); -	list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); +	list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);  	spin_unlock_bh(&xprt->bc_pa_lock);  } +/* + * One or more rpc_rqst structure have been preallocated during the + * backchannel setup.  Buffer space for the send and private XDR buffers + * has been preallocated as well.  Use xprt_alloc_bc_request to allocate + * to this request.  Use xprt_free_bc_request to return it. + * + * We know that we're called in soft interrupt context, grab the spin_lock + * since there is no need to grab the bottom half spin_lock. + * + * Return an available rpc_rqst, otherwise NULL if non are available. + */ +struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) +{ +	struct rpc_rqst *req; + +	spin_lock(&xprt->bc_pa_lock); +	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { +		if (req->rq_connect_cookie != xprt->connect_cookie) +			continue; +		if (req->rq_xid == xid) +			goto found; +	} +	req = xprt_alloc_bc_request(xprt, xid); +found: +	spin_unlock(&xprt->bc_pa_lock); +	return req; +} + +/* + * Add callback request to callback list.  The callback + * service sleeps on the sv_cb_waitq waiting for new + * requests.  Wake it up after adding enqueing the + * request. + */ +void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) +{ +	struct rpc_xprt *xprt = req->rq_xprt; +	struct svc_serv *bc_serv = xprt->bc_serv; + +	req->rq_private_buf.len = copied; +	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); + +	dprintk("RPC:       add callback request to list\n"); +	spin_lock(&bc_serv->sv_cb_lock); +	list_del(&req->rq_bc_pa_list); +	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); +	wake_up(&bc_serv->sv_cb_waitq); +	spin_unlock(&bc_serv->sv_cb_lock); +} +  | 
