aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-04 07:05:17 +0000
committerDavid S. Miller <davem@davemloft.net>2011-12-04 13:20:40 -0500
commit761965eab38d2cbc59c36e355c59609e3a04705a (patch)
tree53bc45ee752f8d31323962e5af2e0451376c3b35 /net/ipv4
parent117632e64d2a5f464e491fe221d7169a3814a77b (diff)
tcp: tcp_sendmsg() page recycling
If our TCP_PAGE(sk) is not shared (page_count() == 1), we can set page offset to 0. This permits better filling of the pages on small to medium tcp writes. "tbench 16" results on my dev server (2x4x2 machine) : Before : 3072 MB/s After : 3146 MB/s (2.4 % gain) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 45156be3abf..a09fe253b91 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1009,7 +1009,12 @@ new_segment:
int merge = 0;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = TCP_PAGE(sk);
- int off = TCP_OFF(sk);
+ int off;
+
+ if (page && page_count(page) == 1)
+ TCP_OFF(sk) = 0;
+
+ off = TCP_OFF(sk);
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {