diff options
author | David Miller <davem@davemloft.net> | 2007-02-27 11:11:09 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-03-09 10:50:26 -0800 |
commit | 2e8c347d749bc09d21917b2f23fa84cb375e586b (patch) | |
tree | fbfdd234e600ff68d4a423ee72aed53478f94bbc /net | |
parent | ede6d26177a046ab7f14840e10cb2bbda6bc91df (diff) |
Fix TCP MD5 locking.
[TCP]: Fix MD5 signature pool locking.
The locking calls assumed that these code paths were only
invoked in software interrupt context, but that isn't true.
Therefore we need to use spin_{lock,unlock}_bh() throughout.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b67e0dd743b..ebe9d0d238f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void) { struct tcp_md5sig_pool **pool = NULL; - spin_lock(&tcp_md5sig_pool_lock); + spin_lock_bh(&tcp_md5sig_pool_lock); if (--tcp_md5sig_users == 0) { pool = tcp_md5sig_pool; tcp_md5sig_pool = NULL; } - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); if (pool) __tcp_free_md5sig_pool(pool); } @@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) int alloc = 0; retry: - spin_lock(&tcp_md5sig_pool_lock); + spin_lock_bh(&tcp_md5sig_pool_lock); pool = tcp_md5sig_pool; if (tcp_md5sig_users++ == 0) { alloc = 1; - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); } else if (!pool) { tcp_md5sig_users--; - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); cpu_relax(); goto retry; } else - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); if (alloc) { /* we cannot hold spinlock here because this may sleep. */ struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); - spin_lock(&tcp_md5sig_pool_lock); + spin_lock_bh(&tcp_md5sig_pool_lock); if (!p) { tcp_md5sig_users--; - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); return NULL; } pool = tcp_md5sig_pool; if (pool) { /* oops, it has already been assigned. */ - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); __tcp_free_md5sig_pool(p); } else { tcp_md5sig_pool = pool = p; - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); } } return pool; @@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) { struct tcp_md5sig_pool **p; - spin_lock(&tcp_md5sig_pool_lock); + spin_lock_bh(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; - spin_unlock(&tcp_md5sig_pool_lock); + spin_unlock_bh(&tcp_md5sig_pool_lock); return (p ? *per_cpu_ptr(p, cpu) : NULL); } |