From ff4cc3ac93e1d0369928fd60ec1fe82417afc576 Mon Sep 17 00:00:00 2001
From: Mike Kershaw <dragorn@kismetwireless.net>
Date: Thu, 1 Sep 2005 17:40:05 -0700
Subject: [TUNTAP]: Allow setting the linktype of the tap device from userspace

Currently tun/tap only supports the EN10MB ARP type.  For use with
wireless and other networking types it should be possible to set the
ARP type via an ioctl.

Patch v2: Included check that the tap interface is down before changing the
link type out from underneath it

Signed-off-by: Mike Kershaw <dragorn@kismetwireless.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/linux/if_tun.h | 1 +
 1 file changed, 1 insertion(+)

(limited to 'include')

diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 096a85a58ae..88aef7b86ef 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -77,6 +77,7 @@ struct tun_struct {
 #define TUNSETIFF     _IOW('T', 202, int) 
 #define TUNSETPERSIST _IOW('T', 203, int) 
 #define TUNSETOWNER   _IOW('T', 204, int)
+#define TUNSETLINK    _IOW('T', 205, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
-- 
cgit v1.2.3-18-g5258


From 732db659b83579b922c18dee9123e1529b5fb5d2 Mon Sep 17 00:00:00 2001
From: Adrian Bunk <bunk@stusta.de>
Date: Thu, 1 Sep 2005 17:40:26 -0700
Subject: [IPVS]: "extern inline" -> "static inline"

"extern inline" doesn't make much sense.

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/net/ip_vs.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'include')

diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7a3c43711a1..e426641c519 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -958,7 +958,7 @@ static __inline__ int ip_vs_todrop(void)
  */
 #define IP_VS_FWD_METHOD(cp)  (cp->flags & IP_VS_CONN_F_FWD_MASK)
 
-extern __inline__ char ip_vs_fwd_tag(struct ip_vs_conn *cp)
+static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
 {
 	char fwd;
 
-- 
cgit v1.2.3-18-g5258


From 64baf3cfea974d2b9e671ccfdbc03e030ea5ebc6 Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Thu, 1 Sep 2005 17:43:05 -0700
Subject: [CRYPTO]: Added CRYPTO_TFM_REQ_MAY_SLEEP flag

The crypto layer currently uses in_atomic() to determine whether it is
allowed to sleep.  This is incorrect since spin locks don't always cause
in_atomic() to return true.

Instead of that, this patch returns to an earlier idea of a per-tfm flag
which determines whether sleeping is allowed.  Unlike the earlier version,
the default is to not allow sleeping.  This ensures that no existing code
can break.

As usual, this flag may either be set through crypto_alloc_tfm(), or
just before a specific crypto operation.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/linux/crypto.h | 1 +
 1 file changed, 1 insertion(+)

(limited to 'include')

diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 5e2bcc636a0..3c89df6e776 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -45,6 +45,7 @@
 #define CRYPTO_TFM_MODE_CTR		0x00000008
 
 #define CRYPTO_TFM_REQ_WEAK_KEY		0x00000100
+#define CRYPTO_TFM_REQ_MAY_SLEEP	0x00000200
 #define CRYPTO_TFM_RES_WEAK_KEY		0x00100000
 #define CRYPTO_TFM_RES_BAD_KEY_LEN   	0x00200000
 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 	0x00400000
-- 
cgit v1.2.3-18-g5258


From d80d99d643090c3cf2b1f9fb3fadd1256f7e384f Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Thu, 1 Sep 2005 17:48:23 -0700
Subject: [NET]: Add sk_stream_wmem_schedule

This patch introduces sk_stream_wmem_schedule as a short-hand for
the sk_forward_alloc checking on egress.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/net/sock.h | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

(limited to 'include')

diff --git a/include/net/sock.h b/include/net/sock.h
index 312cb25cbd1..e51e626e9af 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
 		sk_stream_mem_schedule(sk, skb->truesize, 1);
 }
 
+static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
+{
+	return size <= sk->sk_forward_alloc ||
+	       sk_stream_mem_schedule(sk, size, 0);
+}
+
 /* Used by processes to "lock" a socket state, so that
  * interrupts and bottom half handlers won't change it
  * from under us. It essentially blocks any incoming
@@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
 	skb = alloc_skb_fclone(size + hdr_len, gfp);
 	if (skb) {
 		skb->truesize += mem;
-		if (sk->sk_forward_alloc >= (int)skb->truesize ||
-		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {
+		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
 			skb_reserve(skb, hdr_len);
 			return skb;
 		}
@@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
 {
 	struct page *page = NULL;
 
-	if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
-	    sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
+	if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
 		page = alloc_pages(sk->sk_allocation, 0);
 	else {
 		sk->sk_prot->enter_memory_pressure();
-- 
cgit v1.2.3-18-g5258


From ef015786152adaff5a6a8bf0c8ea2f70cee8059d Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Thu, 1 Sep 2005 17:48:59 -0700
Subject: [TCP]: Fix sk_forward_alloc underflow in tcp_sendmsg

I've finally found a potential cause of the sk_forward_alloc underflows
that people have been reporting sporadically.

When tcp_sendmsg tacks on extra bits to an existing TCP_PAGE we don't
check sk_forward_alloc even though a large amount of time may have
elapsed since we allocated the page.  In the mean time someone could've
come along and liberated packets and reclaimed sk_forward_alloc memory.

This patch makes tcp_sendmsg check sk_forward_alloc every time as we
do in do_tcp_sendpages.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/net/sock.h | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

(limited to 'include')

diff --git a/include/net/sock.h b/include/net/sock.h
index e51e626e9af..cf628261da5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1232,9 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
 {
 	struct page *page = NULL;
 
-	if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
-		page = alloc_pages(sk->sk_allocation, 0);
-	else {
+	page = alloc_pages(sk->sk_allocation, 0);
+	if (!page) {
 		sk->sk_prot->enter_memory_pressure();
 		sk_stream_moderate_sndbuf(sk);
 	}
-- 
cgit v1.2.3-18-g5258


From 6475be16fd9b3c6746ca4d18959246b13c669ea8 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Thu, 1 Sep 2005 22:47:01 -0700
Subject: [TCP]: Keep TSO enabled even during loss events.

All we need to do is resegment the queue so that
we record SACK information accurately.  The edges
of the SACK blocks guide our resegmenting decisions.

With help from Herbert Xu.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/net/tcp.h | 1 +
 1 file changed, 1 insertion(+)

(limited to 'include')

diff --git a/include/net/tcp.h b/include/net/tcp.h
index d6bcf1317a6..97af77c4d09 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -454,6 +454,7 @@ extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 extern void tcp_xmit_retransmit_queue(struct sock *);
 extern void tcp_simple_retransmit(struct sock *);
 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 
 extern void tcp_send_probe0(struct sock *);
 extern void tcp_send_partial(struct sock *);
-- 
cgit v1.2.3-18-g5258