diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/flow.c | 12 | ||||
| -rw-r--r-- | net/core/net-sysfs.c | 7 | ||||
| -rw-r--r-- | net/core/request_sock.c | 7 | ||||
| -rw-r--r-- | net/core/secure_seq.c | 2 | ||||
| -rw-r--r-- | net/core/sock.c | 6 |
5 files changed, 24 insertions, 10 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index 8ae42de9c79..e318c7e9804 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -358,6 +358,18 @@ void flow_cache_flush(void) put_online_cpus(); } +static void flow_cache_flush_task(struct work_struct *work) +{ + flow_cache_flush(); +} + +static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); + +void flow_cache_flush_deferred(void) +{ + schedule_work(&flow_cache_flush_work); +} + static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c71c434a4c0..385aefe5364 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, if (count) { int i; - if (count > 1<<30) { + if (count > INT_MAX) + return -EINVAL; + count = roundup_pow_of_two(count); + if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) + / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } - count = roundup_pow_of_two(count); table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); if (!table) return -ENOMEM; diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 182236b2510..9b570a6a33c 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -26,10 +26,11 @@ * but then some measure against one socket starving all other sockets * would be needed. * - * It was 128 by default. Experiments with real servers show, that + * The minimum value of it is 128. Experiments with real servers show that * it is absolutely not enough even at 100conn/sec. 256 cures most - * of problems. This value is adjusted to 128 for very small machines - * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). + * of problems. + * This value is adjusted to 128 for low memory machines, + * and it will increase in proportion to the memory of machine. * Note : Dont forget somaxconn that may limit backlog too. */ int sysctl_max_syn_backlog = 256; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 025233de25f..925991ae6f5 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -19,6 +19,7 @@ static int __init net_secret_init(void) } late_initcall(net_secret_init); +#ifdef CONFIG_INET static u32 seq_scale(u32 seq) { /* @@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq) */ return seq + (ktime_to_ns(ktime_get_real()) >> 6); } +#endif #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, diff --git a/net/core/sock.c b/net/core/sock.c index 4ed7b1d12f5..b23f174ab84 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; - /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces - number of warnings when compiling with -W --ANK - */ - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) { + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; |
