patch-2.1.80 linux/net/sunrpc/svcsock.c

Next file: linux/net/sunrpc/xprt.c
Previous file: linux/net/sunrpc/sched.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.79/linux/net/sunrpc/svcsock.c linux/net/sunrpc/svcsock.c
@@ -131,10 +131,10 @@
 {
 	struct svc_sock	*svsk;
 
-	disable_bh(NET_BH);
+	start_bh_atomic();
 	if ((svsk = serv->sv_sockets) != NULL)
 		rpc_remove_list(&serv->sv_sockets, svsk);
-	enable_bh(NET_BH);
+	end_bh_atomic();
 
 	if (svsk) {
 		dprintk("svc: socket %p dequeued\n", svsk->sk_sk);
@@ -151,7 +151,7 @@
 static inline void
 svc_sock_received(struct svc_sock *svsk, int count)
 {
-	disable_bh(NET_BH);
+	start_bh_atomic();
 	if ((svsk->sk_data -= count) < 0) {
 		printk(KERN_NOTICE "svc: sk_data negative!\n");
 		svsk->sk_data = 0;
@@ -163,7 +163,7 @@
 						svsk->sk_sk);
 		svc_sock_enqueue(svsk);
 	}
-	enable_bh(NET_BH);
+	end_bh_atomic();
 }
 
 /*
@@ -172,7 +172,7 @@
 static inline void
 svc_sock_accepted(struct svc_sock *svsk)
 {
-	disable_bh(NET_BH);
+	start_bh_atomic();
         svsk->sk_busy = 0;
         svsk->sk_conn--;
         if (svsk->sk_conn || svsk->sk_data || svsk->sk_close) {
@@ -180,7 +180,7 @@
 						svsk->sk_sk);
                 svc_sock_enqueue(svsk);
         }
-	enable_bh(NET_BH);
+	end_bh_atomic();
 }
 
 /*
@@ -739,9 +739,9 @@
 	if (signalled())
 		return -EINTR;
 
-	disable_bh(NET_BH);
+	start_bh_atomic();
 	if ((svsk = svc_sock_dequeue(serv)) != NULL) {
-		enable_bh(NET_BH);
+		end_bh_atomic();
 		rqstp->rq_sock = svsk;
 		svsk->sk_inuse++; /* N.B. where is this decremented? */
 	} else {
@@ -756,7 +756,7 @@
 		 */
 		current->state = TASK_INTERRUPTIBLE;
 		add_wait_queue(&rqstp->rq_wait, &wait);
-		enable_bh(NET_BH);
+		end_bh_atomic();
 		schedule();
 
 		if (!(svsk = rqstp->rq_sock)) {

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov