aboutsummaryrefslogtreecommitdiff
path: root/net/packet
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-01-30 14:12:06 -0800
committerGreg Kroah-Hartman <gregkh@suse.de>2009-02-17 09:46:24 -0800
commitce8b57d099ca4a831098f07e1308dbc23cb13b9f (patch)
tree8fd1d6f6dc50e9ec5c597157a1999c56ba0fb6c6 /net/packet
parent42f1115b1be1f528a082b97b127e909d6f640079 (diff)
packet: Avoid lock_sock in mmap handler
[ Upstream commit 905db44087855e3c1709f538ecdc22fd149cadd8 ] As the mmap handler gets called under mmap_sem, and we may grab mmap_sem elsewhere under the socket lock to access user data, we should avoid grabbing the socket lock in the mmap handler. Since the only thing we care about in the mmap handler is for pg_vec* to be invariant, i.e., to exclude packet_set_ring, we can achieve this by simply using a new mutex. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Tested-by: Martin MOKREJŠ <mmokrejs@ribosome.natur.cuni.cz> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1789f6cf1cb..4b508782c44 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -77,6 +77,7 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -175,6 +176,7 @@ struct packet_sock {
#endif
struct packet_type prot_hook;
spinlock_t bind_lock;
+ struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/
auxdata:1,
origdev:1;
@@ -1068,6 +1070,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
*/
spin_lock_init(&po->bind_lock);
+ mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
@@ -1863,6 +1866,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
synchronize_net();
err = -EBUSY;
+ mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@@ -1884,6 +1888,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
}
+ mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running && !po->running) {
@@ -1916,7 +1921,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
size = vma->vm_end - vma->vm_start;
- lock_sock(sk);
+ mutex_lock(&po->pg_vec_lock);
if (po->pg_vec == NULL)
goto out;
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@@ -1939,7 +1944,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
err = 0;
out:
- release_sock(sk);
+ mutex_unlock(&po->pg_vec_lock);
return err;
}
#endif