diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2012-04-24 23:01:22 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-04-27 10:17:06 -0700 |
commit | 0df29c4a2857ed43ff23689423144aaf4f4101bc (patch) | |
tree | e60e1c81cd20818bf597201f7c51585592ce13eb | |
parent | 1cf9d571c6e48f15608203fc76fa29c617459e0c (diff) |
tcp: avoid order-1 allocations on wifi and tx path
[ This combines upstream commit
a21d45726acacc963d8baddf74607d9b74e2b723 and the follow-on bug fix
commit a21d45726acacc963d8baddf74607d9b74e2b723 ]
Marc Merlin reported many order-1 allocations failures in TX path on its
wireless setup, that dont make any sense with MTU=1500 network, and non
SG capable hardware.
After investigation, it turns out TCP uses sk_stream_alloc_skb() and
used as a convention skb_tailroom(skb) to know how many bytes of data
payload could be put in this skb (for non SG capable devices)
Note : these skb used kmalloc-4096 (MTU=1500 + MAX_HEADER +
sizeof(struct skb_shared_info) being above 2048)
Later, mac80211 layer need to add some bytes at the tail of skb
(IEEE80211_ENCRYPT_TAILROOM = 18 bytes) and since no more tailroom is
available has to call pskb_expand_head() and request order-1
allocations.
This patch changes sk_stream_alloc_skb() so that only
sk->sk_prot->max_header bytes of headroom are reserved, and use a new
skb field, avail_size to hold the data payload limit.
This way, order-0 allocations done by TCP stack can leave more than 2 KB
of tailroom and no more allocation is performed in mac80211 layer (or
any layer needing some tailroom)
avail_size is unioned with mark/dropcount, since mark will be set later
in IP stack for output packets. Therefore, skb size is unchanged.
Reported-by: Marc MERLIN <marc@merlins.org>
Tested-by: Marc MERLIN <marc@merlins.org>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | include/linux/skbuff.h | 13 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 3 |
3 files changed, 19 insertions, 5 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ae86adee374..42854cee6fc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -468,6 +468,7 @@ struct sk_buff { union { __u32 mark; __u32 dropcount; + __u32 avail_size; }; __u16 vlan_tci; @@ -1338,6 +1339,18 @@ static inline int skb_tailroom(const struct sk_buff *skb) } /** + * skb_availroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + * allocated by sk_stream_alloc() + */ +static inline int skb_availroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; +} + +/** * skb_reserve - adjust headroom * @skb: buffer to alter * @len: bytes to move diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 86014bbd3e3..8c850211916 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -699,11 +699,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); if (skb) { if (sk_wmem_schedule(sk, skb->truesize)) { + skb_reserve(skb, sk->sk_prot->max_header); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ - skb_reserve(skb, skb_tailroom(skb) - size); + skb->avail_size = size; return skb; } __kfree_skb(skb); @@ -993,10 +994,9 @@ new_segment: copy = seglen; /* Where to copy to? */ - if (skb_tailroom(skb) > 0) { + if (skb_availroom(skb) > 0) { /* We have some space in skb head. Superb! */ - if (copy > skb_tailroom(skb)) - copy = skb_tailroom(skb); + copy = min_t(int, copy, skb_availroom(skb)); err = skb_add_data_nocache(sk, skb, from, copy); if (err) goto do_fault; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4ff3b6dc74f..e2c6722f717 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); + skb->avail_size -= eat; len -= eat; if (!len) return; @@ -2060,7 +2061,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, /* Punt if not enough space exists in the first SKB for * the data in the second */ - if (skb->len > skb_tailroom(to)) + if (skb->len > skb_availroom(to)) break; if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) |