diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2007-12-30 21:07:53 -0600 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-01 16:42:11 -0500 |
commit | 7a18208383ab3f3ce4a1f4e0536acc9372523d81 (patch) | |
tree | 851a1cc29d753297c2e7cf4cb4a95c4af7868427 /net/sunrpc/svcsock.c | |
parent | bb5cf160b282644c4491afbf76fbc66f5dc35030 (diff) | |
download | kernel_samsung_smdk4412-7a18208383ab3f3ce4a1f4e0536acc9372523d81.tar.gz kernel_samsung_smdk4412-7a18208383ab3f3ce4a1f4e0536acc9372523d81.tar.bz2 kernel_samsung_smdk4412-7a18208383ab3f3ce4a1f4e0536acc9372523d81.zip |
svc: Make close transport independent
Move sk_list and sk_ready to svc_xprt. This involves close because these
lists are walked by svcs when closing all their transports. So I combined
the moving of these lists to svc_xprt with making close transport independent.
The svc_force_sock_close has been changed to svc_close_all and takes a list
as an argument. This removes some svc internals knowledge from the svcs.
This code races with module removal and transport addition.
Thanks to Simon Holm Thøgersen for a compile fix.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Cc: Simon Holm Thøgersen <odie@cs.aau.dk>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r-- | net/sunrpc/svcsock.c | 104 |
1 files changed, 50 insertions, 54 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index f86538e1dec..6f63a5ca6a9 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -81,11 +81,11 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, int *errp, int flags); -static void svc_delete_socket(struct svc_sock *svsk); +static void svc_delete_xprt(struct svc_xprt *xprt); static void svc_udp_data_ready(struct sock *, int); static int svc_udp_recvfrom(struct svc_rqst *); static int svc_udp_sendto(struct svc_rqst *); -static void svc_close_socket(struct svc_sock *svsk); +static void svc_close_xprt(struct svc_xprt *xprt); static void svc_sock_detach(struct svc_xprt *); static void svc_sock_free(struct svc_xprt *); @@ -293,7 +293,7 @@ svc_sock_enqueue(struct svc_sock *svsk) wake_up(&rqstp->rq_wait); } else { dprintk("svc: socket %p put into queue\n", svsk->sk_sk); - list_add_tail(&svsk->sk_ready, &pool->sp_sockets); + list_add_tail(&svsk->sk_xprt.xpt_ready, &pool->sp_sockets); BUG_ON(svsk->sk_xprt.xpt_pool != pool); } @@ -313,8 +313,8 @@ svc_sock_dequeue(struct svc_pool *pool) return NULL; svsk = list_entry(pool->sp_sockets.next, - struct svc_sock, sk_ready); - list_del_init(&svsk->sk_ready); + struct svc_sock, sk_xprt.xpt_ready); + list_del_init(&svsk->sk_xprt.xpt_ready); dprintk("svc: socket %p dequeued, inuse=%d\n", svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); @@ -572,7 +572,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) if (!serv) return 0; spin_lock_bh(&serv->sv_lock); - list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { + list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { int onelen = one_sock_name(buf+len, svsk); if (toclose && strcmp(toclose, buf+len) == 0) closesk = svsk; @@ -584,7 +584,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) /* Should unregister with portmap, but you cannot * unregister just one protocol... */ - svc_close_socket(closesk); + svc_close_xprt(&closesk->sk_xprt); else if (toclose) return -ENOENT; return len; @@ -1427,12 +1427,12 @@ svc_sock_update_bufs(struct svc_serv *serv) spin_lock_bh(&serv->sv_lock); list_for_each(le, &serv->sv_permsocks) { struct svc_sock *svsk = - list_entry(le, struct svc_sock, sk_list); + list_entry(le, struct svc_sock, sk_xprt.xpt_list); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); } list_for_each(le, &serv->sv_tempsocks) { struct svc_sock *svsk = - list_entry(le, struct svc_sock, sk_list); + list_entry(le, struct svc_sock, sk_xprt.xpt_list); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); } spin_unlock_bh(&serv->sv_lock); @@ -1469,7 +1469,7 @@ static void svc_check_conn_limits(struct svc_serv *serv) */ svsk = list_entry(serv->sv_tempsocks.prev, struct svc_sock, - sk_list); + sk_xprt.xpt_list); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); svc_xprt_get(&svsk->sk_xprt); } @@ -1576,7 +1576,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) len = 0; if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); - svc_delete_socket(svsk); + svc_delete_xprt(&svsk->sk_xprt); } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) { struct svc_xprt *newxpt; newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); @@ -1685,7 +1685,7 @@ svc_age_temp_sockets(unsigned long closure) } list_for_each_safe(le, next, &serv->sv_tempsocks) { - svsk = list_entry(le, struct svc_sock, sk_list); + svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list); if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags)) continue; @@ -1701,9 +1701,9 @@ svc_age_temp_sockets(unsigned long closure) while (!list_empty(&to_be_aged)) { le = to_be_aged.next; - /* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */ + /* fiddling the sk_xprt.xpt_list node is safe 'cos we're XPT_DETACHED */ list_del_init(le); - svsk = list_entry(le, struct svc_sock, sk_list); + svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list); dprintk("queuing svsk %p for closing, %lu seconds old\n", svsk, get_seconds() - svsk->sk_lastrecv); @@ -1757,7 +1757,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, svsk->sk_lastrecv = get_seconds(); spin_lock_init(&svsk->sk_lock); INIT_LIST_HEAD(&svsk->sk_deferred); - INIT_LIST_HEAD(&svsk->sk_ready); mutex_init(&svsk->sk_mutex); /* Initialize the socket */ @@ -1769,7 +1768,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, spin_lock_bh(&serv->sv_lock); if (is_temporary) { set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); - list_add(&svsk->sk_list, &serv->sv_tempsocks); + list_add(&svsk->sk_xprt.xpt_list, &serv->sv_tempsocks); serv->sv_tmpcnt++; if (serv->sv_temptimer.function == NULL) { /* setup timer to age temp sockets */ @@ -1780,7 +1779,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, } } else { clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); - list_add(&svsk->sk_list, &serv->sv_permsocks); + list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks); } spin_unlock_bh(&serv->sv_lock); @@ -1912,66 +1911,63 @@ static void svc_sock_free(struct svc_xprt *xprt) } /* - * Remove a dead socket + * Remove a dead transport */ -static void -svc_delete_socket(struct svc_sock *svsk) +static void svc_delete_xprt(struct svc_xprt *xprt) { - struct svc_serv *serv; - struct sock *sk; - - dprintk("svc: svc_delete_socket(%p)\n", svsk); - - serv = svsk->sk_xprt.xpt_server; - sk = svsk->sk_sk; + struct svc_serv *serv = xprt->xpt_server; - svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt); + dprintk("svc: svc_delete_xprt(%p)\n", xprt); + xprt->xpt_ops->xpo_detach(xprt); spin_lock_bh(&serv->sv_lock); - - if (!test_and_set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags)) - list_del_init(&svsk->sk_list); + if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) + list_del_init(&xprt->xpt_list); /* - * We used to delete the svc_sock from whichever list - * it's sk_ready node was on, but we don't actually + * We used to delete the transport from whichever list + * it's sk_xprt.xpt_ready node was on, but we don't actually * need to. This is because the only time we're called * while still attached to a queue, the queue itself * is about to be destroyed (in svc_destroy). */ - if (!test_and_set_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) { - BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2); - if (test_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags)) + if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { + BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); + if (test_bit(XPT_TEMP, &xprt->xpt_flags)) serv->sv_tmpcnt--; - svc_xprt_put(&svsk->sk_xprt); + svc_xprt_put(xprt); } - spin_unlock_bh(&serv->sv_lock); } -static void svc_close_socket(struct svc_sock *svsk) +static void svc_close_xprt(struct svc_xprt *xprt) { - set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); - if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) + set_bit(XPT_CLOSE, &xprt->xpt_flags); + if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ return; - svc_xprt_get(&svsk->sk_xprt); - svc_delete_socket(svsk); - clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags); - svc_xprt_put(&svsk->sk_xprt); + svc_xprt_get(xprt); + svc_delete_xprt(xprt); + clear_bit(XPT_BUSY, &xprt->xpt_flags); + svc_xprt_put(xprt); } -void svc_force_close_socket(struct svc_sock *svsk) +void svc_close_all(struct list_head *xprt_list) { - set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); - if (test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) { - /* Waiting to be processed, but no threads left, - * So just remove it from the waiting list - */ - list_del_init(&svsk->sk_ready); - clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags); + struct svc_xprt *xprt; + struct svc_xprt *tmp; + + list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { + set_bit(XPT_CLOSE, &xprt->xpt_flags); + if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { + /* Waiting to be processed, but no threads left, + * So just remove it from the waiting list + */ + list_del_init(&xprt->xpt_ready); + clear_bit(XPT_BUSY, &xprt->xpt_flags); + } + svc_close_xprt(xprt); } - svc_close_socket(svsk); } /* |