aboutsummaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c227
1 files changed, 223 insertions, 4 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ec85681a7dd..f5b46f6a970 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -73,9 +73,25 @@
static struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+
+static void sock_spd_buf_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+ struct sk_buff *skb = (struct sk_buff *)spd->partial[i].private;
+
+ kfree_skb(skb);
+}
+
+
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
+ struct sk_buff *skb = (struct sk_buff *) buf->private;
+
+ kfree_skb(skb);
+}
+static void sock_pipe_buf_release_1(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
put_page(buf->page);
}
@@ -1374,7 +1390,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
}
-
+#if 0
static inline struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
struct sk_buff *skb, struct sock *sk)
@@ -1488,7 +1504,6 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
return 0;
}
-
/*
* Map linear and fragment data from the skb to spd. It reports failure if the
* pipe is full or if we already spliced the requested length.
@@ -1521,7 +1536,6 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
return 0;
}
-
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list. It does NOT handle frag lists within
@@ -1539,7 +1553,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.partial = partial,
.flags = flags,
.ops = &sock_pipe_buf_ops,
- .spd_release = sock_spd_release,
+ .spd_release = sock_spd_buf_release,
};
struct sk_buff *frag_iter;
struct sock *sk = skb->sk;
@@ -1584,7 +1598,212 @@ done:
return 0;
}
+#else
+/*
+ * Fill page/offset/length into spd, if it can hold more pages.
+ */
+static inline int spd_fill_page_1(struct splice_pipe_desc *spd, struct page *page,
+ unsigned int len, unsigned int offset,
+ struct sk_buff *skb)
+{
+ if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+ return 1;
+
+ //get_page(page);
+ spd->pages[spd->nr_pages] = page;
+ spd->partial[spd->nr_pages].len = len;
+ spd->partial[spd->nr_pages].offset = offset;
+ spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
+ spd->nr_pages++;
+ return 0;
+}
+/*
+ * Map linear and fragment data from the skb to spd. Returns number of
+ * pages mapped.
+ */
+static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
+ unsigned int *total_len,
+ struct splice_pipe_desc *spd,
+ struct sock *sk)
+{
+ unsigned int nr_pages = spd->nr_pages;
+ unsigned int poff, plen, len, toff, tlen;
+ int headlen, seg;
+
+ toff = *offset;
+ tlen = *total_len;
+ if (!tlen)
+ goto err;
+
+ /*
+ * if the offset is greater than the linear part, go directly to
+ * the fragments.
+ */
+ headlen = skb_headlen(skb);
+ if (toff >= headlen) {
+ toff -= headlen;
+ goto map_frag;
+ }
+
+ /*
+ * first map the linear region into the pages/partial map, skipping
+ * any potential initial offset.
+ */
+ len = 0;
+ while (len < headlen) {
+ void *p = skb->data + len;
+
+ poff = (unsigned long) p & (PAGE_SIZE - 1);
+ plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff);
+ len += plen;
+
+ if (toff) {
+ if (plen <= toff) {
+ toff -= plen;
+ continue;
+ }
+ plen -= toff;
+ poff += toff;
+ toff = 0;
+ }
+
+ plen = min(plen, tlen);
+ if (!plen)
+ break;
+
+ /*
+ * just jump directly to update and return, no point
+ * in going over fragments when the output is full.
+ */
+ if (spd_fill_page_1(spd, virt_to_page(p), plen, poff, skb))
+ goto done;
+
+ tlen -= plen;
+ }
+
+ /*
+ * then map the fragments
+ */
+map_frag:
+ for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+ const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+
+ plen = f->size;
+ poff = f->page_offset;
+
+ if (toff) {
+ if (plen <= toff) {
+ toff -= plen;
+ continue;
+ }
+ plen -= toff;
+ poff += toff;
+ toff = 0;
+ }
+
+ plen = min(plen, tlen);
+ if (!plen)
+ break;
+
+ if (spd_fill_page_1(spd, f->page, plen, poff, skb))
+ break;
+
+ tlen -= plen;
+ }
+
+done:
+ if (spd->nr_pages - nr_pages) {
+ *offset = 0;
+ *total_len = tlen;
+ return 0;
+ }
+err:
+ return 1;
+}
+
+/*
+ * Map data from the skb to a pipe. Should handle both the linear part,
+ * the fragments, and the frag list. It does NOT handle frag lists within
+ * the frag list, if such a thing exists. We'd probably need to recurse to
+ * handle that cleanly.
+ */
+int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+ struct pipe_inode_info *pipe, unsigned int tlen,
+ unsigned int flags)
+{
+ struct partial_page partial[PIPE_BUFFERS];
+ struct page *pages[PIPE_BUFFERS];
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .flags = flags,
+ .ops = &sock_pipe_buf_ops,
+ .spd_release = sock_spd_buf_release,
+ };
+ struct sock *sk = __skb->sk;
+#if 1
+ struct sk_buff *skb;
+ /*
+ * I'd love to avoid the clone here, but tcp_read_sock()
+ * ignores reference counts and unconditonally kills the sk_buff
+ * on return from the actor.
+ */
+ skb = skb_clone(__skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+#endif
+ /*
+ * __skb_splice_bits() only fails if the output has no room left,
+ * so no point in going over the frag_list for the error case.
+ */
+ if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+ goto done;
+ else if (!tlen)
+ goto done;
+
+ /*
+ * now see if we have a frag_list to map
+ */
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list && tlen; list = list->next) {
+ if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
+ break;
+ }
+ }
+
+done:
+#if 1
+ /*
+ * drop our reference to the clone, the pipe consumption will
+ * drop the rest.
+ */
+ kfree_skb(skb);
+#endif
+ if (spd.nr_pages) {
+ int ret;
+
+ /*
+ * Drop the socket lock, otherwise we have reverse
+ * locking dependencies between sk_lock and i_mutex
+ * here as compared to sendfile(). We enter here
+ * with the socket lock held, and splice_to_pipe() will
+ * grab the pipe inode lock. For sendfile() emulation,
+ * we call into ->sendpage() with the i_mutex lock held
+ * and networking will grab the socket lock.
+ */
+ release_sock(sk);
+ ret = splice_to_pipe(pipe, &spd);
+ lock_sock(sk);
+ return ret;
+ }
+
+ return 0;
+}
+
+#endif
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer