diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/Kconfig | 13 | ||||
-rw-r--r-- | net/ipv4/ah4.c | 884 | ||||
-rw-r--r-- | net/ipv4/esp4.c | 861 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 9 | ||||
-rw-r--r-- | net/ipv4/xfrm4_mode_transport.c | 19 |
5 files changed, 1648 insertions, 138 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 70491d9035e..83a930e0ac9 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -351,6 +351,19 @@ config INET_ESP If unsure, say Y. +config INET_ESP_NR_REQ_CACHE + int "Number of ESP request cache per connection" + range 0 256 + depends on INET_ESP + default "0" + ---help--- + Specify the number of ESP request crypto object cache per connection. + For ESP transport and tunnel modes, ESP request crypto cache object + can be cached instead returns to the memory pool after each packet + is being processed. This helps performance for slow processor with + memory cost. This value should be equal to the hardware offload + descriptor size. + config INET_IPCOMP tristate "IP: IPComp transformation" select INET_XFRM_TUNNEL diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 5c662703eb1..71150bff012 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -8,13 +8,152 @@ #include <linux/spinlock.h> #include <net/icmp.h> #include <net/protocol.h> +#include <crypto/authenc.h> +#include <linux/highmem.h> +#include <crypto/hash.h> +#define DEBUG_AH +#ifndef DEBUG_AH +# define AH_DUMP_PKT print_hex_dump +#else +# define AH_DUMP_PKT(arg...) +#endif + +/** + * @brief SKB private data for AH stored in skb cb field + * + * @tmp_req - temporary ahash/aead request + * @icv_trunc_len - AH ICV length for software AH + * @nh - Next header for hardware offload AH + * + */ +struct ah_skb_cb { + void *tmp_req; + u16 icv_trunc_len; + u8 nh; +}; + +#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) + +/** + * @brief AH work buffer (union) for software AH + * @iph - IP header access + * @buf - byte address access + * @note Used to save IP header and IP options + * + */ +union ah_tmp_iph { + struct iphdr iph; + char buf[60]; +}; + +#define AH_WORK_BUF_MAX_LEN sizeof(union ah_tmp_iph) + +/* + * Allocate an ahash request structure with extra space for structure + * ah_tmp_iph (scatch pad), ICV (input save ICV), working ICV + * (space for hash algorithm to store ICV), and SG. + * + */ +static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags) +{ + unsigned int len; + + len = AH_WORK_BUF_MAX_LEN; + len += MAX_AH_AUTH_LEN; + len += crypto_ahash_digestsize(ahash); + len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); + len += ALIGN(len, __alignof__(struct scatterlist)); + len += sizeof(struct scatterlist) * nfrags; + + return kmalloc(len, GFP_ATOMIC); +} + +static inline void ah_free_tmp(void *tmp) +{ + kfree(tmp); +} + +static inline union ah_tmp_iph *ah_tmp_work_buf(void *tmp) +{ + return tmp; +} + +static inline u8 *ah_tmp_icv(union ah_tmp_iph *tmp) +{ + return (u8 *) (tmp + 1); +} + +static inline u8 *ah_tmp_work_icv(u8 *tmp) +{ + return tmp + MAX_AH_AUTH_LEN; +} + +static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, + u8 *tmp) +{ + struct ahash_request *req = (struct ahash_request *) (tmp + + crypto_ahash_digestsize(ahash)); + ahash_request_set_tfm(req, ahash); + return req; +} + +static inline struct scatterlist *ah_tmp_sg(struct crypto_ahash *ahash, + struct ahash_request *req) +{ + return (void *) ALIGN((unsigned long) (req + 1) + + crypto_ahash_reqsize(ahash), + __alignof__(struct scatterlist)); +} + +/* + * Allocate an aead request structure with extra space for structure + * SG. + * + */ +static void *ah_alloc_aead_tmp(struct crypto_aead *aead, int nfrags) +{ + unsigned int len; + + len = sizeof(struct aead_request) + crypto_aead_reqsize(aead); + len += ALIGN(len, __alignof__(struct scatterlist)); + len += sizeof(struct scatterlist) * nfrags; + + return kmalloc(len, GFP_ATOMIC); +} + +static inline void ah_free_aead_tmp(void *tmp) +{ + kfree(tmp); +} + +static inline struct aead_request *ah_tmp_aead_req(struct crypto_aead *aead, + void *tmp) +{ + struct aead_request *req = (struct aead_request *) tmp; + aead_request_set_tfm(req, aead); + return req; +} + +static inline struct scatterlist *ah_tmp_aead_sg(struct crypto_aead *aead, + struct aead_request *req) +{ + return (void *) ALIGN((unsigned long) (req + 1) + + crypto_aead_reqsize(aead), + __alignof__(struct scatterlist)); +} +static inline struct scatterlist *ah_tmp_aead_dsg(struct scatterlist *sg, + unsigned int nfrags) +{ + return (void *) ((unsigned long) sg + + sizeof(struct scatterlist) * nfrags); + +} /* Clear mutable options and find final destination to substitute * into IP header for icv calculation. Options are already checked * for validity, so paranoia is not required. */ - -static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) +int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) { unsigned char * optptr = (unsigned char*)(iph+1); int l = iph->ihl*4 - sizeof(struct iphdr); @@ -53,27 +192,133 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) } return 0; } +EXPORT_SYMBOL_GPL(ip_clear_mutable_options); -static int ah_output(struct xfrm_state *x, struct sk_buff *skb) +/******************************************************************************* + * AH Software Functions + * + ******************************************************************************* + */ +static int ah_output_done2(struct sk_buff *skb, int err) +{ + void *req_tmp = AH_SKB_CB(skb)->tmp_req; + struct iphdr *iph; + struct iphdr *top_iph; + union ah_tmp_iph *tmp_iph; + struct ip_auth_hdr *ah; + char *icv; + char *work_icv; + + if (err < 0) + goto out; + + tmp_iph = ah_tmp_work_buf(req_tmp); + icv = ah_tmp_icv(tmp_iph); + work_icv = ah_tmp_work_icv(icv); + iph = &tmp_iph->iph; + top_iph = ip_hdr(skb); + ah = ip_auth_hdr(skb); + + /* Set ICV in AH header */ + memcpy(ah->auth_data, work_icv, AH_SKB_CB(skb)->icv_trunc_len); + + /* Restore mute fields */ + top_iph->tos = iph->tos; + top_iph->ttl = iph->ttl; + top_iph->frag_off = iph->frag_off; + if (top_iph->ihl != 5) { + top_iph->daddr = iph->daddr; + memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); + } + + AH_DUMP_PKT(KERN_INFO, "AH output sw done: ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); + +out: + kfree(req_tmp); + return err; +} + +static void ah_output_done(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_output_resume(skb, ah_output_done2(skb, err)); +} + +static int ah_output_sw(struct xfrm_state *x, struct sk_buff *skb) { int err; struct iphdr *iph, *top_iph; struct ip_auth_hdr *ah; - struct ah_data *ahp; - union { - struct iphdr iph; - char buf[60]; - } tmp_iph; + struct ah_data *ahp; + struct ahash_request *areq; + struct scatterlist *sg; + int nfrags; + void *req_tmp = NULL; + union ah_tmp_iph *tmp_iph; + char *icv; + char *work_icv; + struct sk_buff *trailer; + + /* SKB transport, network, and mac header pointers are set by + transport or tunnel modules. + + Transport Input: + ----------------------- + | IP | Rsvd | Payload | + ----------------------- + ^ + | + skb.data + + Tunnel Input: + ---------------------------------------- + | Outer IP | Rsvd | Inner IP | Payload | + ---------------------------------------- + ^ + | + skb.data + */ + + AH_DUMP_PKT(KERN_INFO, "AH output sw : ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); skb_push(skb, -skb_network_offset(skb)); + + /* Find # of fragments */ + if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + goto error; + nfrags = err; + + /* Allocate temp request */ + ahp = x->data; + req_tmp = ah_alloc_tmp(ahp->utfm.atfm, nfrags); + if (!req_tmp) { + err = -ENOMEM; + goto error; + } + + AH_SKB_CB(skb)->tmp_req = req_tmp; + AH_SKB_CB(skb)->icv_trunc_len = ahp->icv_trunc_len; + tmp_iph = ah_tmp_work_buf(req_tmp); + icv = ah_tmp_icv(tmp_iph); + work_icv = ah_tmp_work_icv(icv); + areq = ah_tmp_req(ahp->utfm.atfm, work_icv); + sg = ah_tmp_sg(ahp->utfm.atfm, areq); + top_iph = ip_hdr(skb); - iph = &tmp_iph.iph; + iph = &tmp_iph->iph; + /* Save IP header to compute hash */ iph->tos = top_iph->tos; iph->ttl = top_iph->ttl; iph->frag_off = top_iph->frag_off; - if (top_iph->ihl != 5) { + if ((top_iph->ihl << 2) > AH_WORK_BUF_MAX_LEN) { + err = -EINVAL; + goto error; + } iph->daddr = top_iph->daddr; memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); err = ip_clear_mutable_options(top_iph, &top_iph->daddr); @@ -81,85 +326,198 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) goto error; } + /* Set AH header */ ah = ip_auth_hdr(skb); ah->nexthdr = *skb_mac_header(skb); *skb_mac_header(skb) = IPPROTO_AH; + /* Mute field for hash */ top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; top_iph->check = 0; - ahp = x->data; + /* Set AH fields */ ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; - ah->reserved = 0; - ah->spi = x->id.spi; - ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); + ah->spi = x->id.spi; + ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); - spin_lock_bh(&x->lock); - err = ah_mac_digest(ahp, skb, ah->auth_data); - memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); - spin_unlock_bh(&x->lock); + /* Mute AH for hash */ + memset(ah->auth_data, 0, ahp->icv_trunc_len); - if (err) + /* Setup SG for hash op */ + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); + ahash_request_set_callback(areq, 0, ah_output_done, skb); + ahash_request_set_crypt(areq, sg, work_icv, skb->len); + + err = crypto_ahash_digest(areq); + if (err == -EINPROGRESS) + goto out; + if (err < 0) goto error; - top_iph->tos = iph->tos; - top_iph->ttl = iph->ttl; - top_iph->frag_off = iph->frag_off; - if (top_iph->ihl != 5) { - top_iph->daddr = iph->daddr; - memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); + return ah_output_done2(skb, err); + +error: + if (req_tmp) + ah_free_tmp(req_tmp); +out: + return err; +} + +static int ah_input_done2(struct sk_buff *skb, int err) +{ + void *req_tmp = AH_SKB_CB(skb)->tmp_req; + struct iphdr *top_iph; + struct ip_auth_hdr *ah; + union ah_tmp_iph *tmp_iph; + int ah_hlen; + int ihl; + char *icv; + char *work_icv; + int nexthdr; + + if (err < 0) + goto out; + + tmp_iph = ah_tmp_work_buf(req_tmp); + icv = ah_tmp_icv(tmp_iph); + work_icv = ah_tmp_work_icv(icv); + + /* Verify ICV */ + if (memcmp(icv, work_icv, AH_SKB_CB(skb)->icv_trunc_len)) { + err = -EBADMSG; + goto out; } - err = 0; + top_iph = ip_hdr(skb); + ihl = top_iph->ihl << 2; + ah = (struct ip_auth_hdr *) ((u8 *) top_iph + ihl); + nexthdr = ah->nexthdr; + ah_hlen = (ah->hdrlen + 2) << 2; + + /* Remove AH header */ + skb->network_header += ah_hlen; + memcpy(skb_network_header(skb), tmp_iph->buf, ihl); + skb->transport_header = skb->network_header; + __skb_pull(skb, ah_hlen + ihl); + + err = nexthdr; -error: + AH_DUMP_PKT(KERN_INFO, "AH input sw done: ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); + +out: + kfree(req_tmp); return err; } -static int ah_input(struct xfrm_state *x, struct sk_buff *skb) +static void ah_input_done(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_input_resume(skb, ah_input_done2(skb, err)); +} + +static int ah_input_sw(struct xfrm_state *x, struct sk_buff *skb) { int ah_hlen; int ihl; int nexthdr; int err = -EINVAL; - struct iphdr *iph; - struct ip_auth_hdr *ah; - struct ah_data *ahp; - char work_buf[60]; + struct iphdr *iph; + struct ip_auth_hdr *ah; + struct ah_data *ahp; + struct sk_buff *trailer; + struct ahash_request *areq; + struct scatterlist *sg; + union ah_tmp_iph *tmp_iph; + int nfrags; + void *req_tmp = NULL; + char *icv; + char *work_icv; + + /* SKB transport, network, and mac header pointers are set by + transport or tunnel modules. + + Transport Input: + ----------------------- + | IP | AH | Payload | + ----------------------- + ^ + | + skb.data + + Tunnel Input: + ---------------------------------------- + | Outer IP | AH | Inner IP | Payload | + ---------------------------------------- + ^ + | + skb.data + */ + + AH_DUMP_PKT(KERN_INFO, "AH input sw : ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); if (!pskb_may_pull(skb, sizeof(*ah))) - goto out; + goto error; - ah = (struct ip_auth_hdr *)skb->data; + ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) - goto out; + goto error; if (!pskb_may_pull(skb, ah_hlen)) - goto out; + goto error; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - goto out; + goto error; + + /* Find # of fragment */ + if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + goto error; + nfrags = err; skb->ip_summed = CHECKSUM_NONE; - ah = (struct ip_auth_hdr *)skb->data; + ah = (struct ip_auth_hdr *)skb->data; iph = ip_hdr(skb); + /* Allocate temp ahash request */ + req_tmp = ah_alloc_tmp(ahp->utfm.atfm, nfrags); + if (!req_tmp) { + err = -ENOMEM; + goto error; + } + AH_SKB_CB(skb)->tmp_req = req_tmp; + AH_SKB_CB(skb)->icv_trunc_len = ahp->icv_trunc_len; + tmp_iph = ah_tmp_work_buf(req_tmp); + icv = ah_tmp_icv(tmp_iph); + work_icv = ah_tmp_work_icv(icv); + areq = ah_tmp_req(ahp->utfm.atfm, work_icv); + sg = ah_tmp_sg(ahp->utfm.atfm, areq); + ihl = skb->data - skb_network_header(skb); - memcpy(work_buf, iph, ihl); + if (ihl > AH_WORK_BUF_MAX_LEN) { + err = -EBADMSG; + goto error; + } + /* Save IP header for hash computation */ + memcpy(tmp_iph->buf, iph, ihl); + + /* Mute fields for hash op */ iph->ttl = 0; iph->tos = 0; iph->frag_off = 0; @@ -167,43 +525,339 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) if (ihl > sizeof(*iph)) { __be32 dummy; if (ip_clear_mutable_options(iph, &dummy)) - goto out; + goto error; } - spin_lock(&x->lock); - { - u8 auth_data[MAX_AH_AUTH_LEN]; + /* Save ICV */ + memcpy(icv, ah->auth_data, ahp->icv_trunc_len); + /* Mute ICV for hash op */ + memset(ah->auth_data, 0, ahp->icv_trunc_len); + /* Add back IP header for SG */ + skb_push(skb, ihl); - memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); - skb_push(skb, ihl); - err = ah_mac_digest(ahp, skb, ah->auth_data); - if (err) - goto unlock; - if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) - err = -EBADMSG; + /* Setup SG */ + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); + ahash_request_set_callback(areq, 0, ah_input_done, skb); + ahash_request_set_crypt(areq, sg, work_icv, skb->len); + + err = crypto_ahash_digest(areq); + if (err == -EINPROGRESS) + goto out; + if (err < 0) + goto error; + + return ah_input_done2(skb, err); + +error: + if (req_tmp) + ah_free_tmp(req_tmp); +out: + return err; +} + +/******************************************************************************* + * AH HW Offload Functions + * + ******************************************************************************* + */ +static int ah_output_done2_hw(struct sk_buff *skb, int err) +{ + void *req_tmp = AH_SKB_CB(skb)->tmp_req; + + if (err < 0) + goto out; + + AH_DUMP_PKT(KERN_INFO, "AH output hw: ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); + +out: + kfree(req_tmp); + return err; +} + +static void ah_output_done_hw(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_output_resume(skb, ah_output_done2_hw(skb, err)); +} + +static int ah_output_hw(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ah_data *ahp; + struct aead_request *areq; + struct scatterlist *sg; + struct scatterlist *dsg; + struct sk_buff *trailer; + void *req_tmp = NULL; + int err; + int nfrags; + unsigned int clen; + + /* For AH transport mode, skb.data is at IP header. skb.len + includes IP header and payload. skb network header, transport + header, and mac headers are updated by transport module code. + + Input: + -------------------------------------------- + | Network Hdr| Transport Hdr| IP | Payload | + -------------------------------------------- + ^ + | + skb.data + + For AH tunnel mode, outer IP header is formed by tunnel module. + skb network header, transport header, and mac header are updated + by tunnel module code. + + Input: + ----------------------------------------------------- + | Outer IP | Rsvd | inner IP Header | Payload | + ----------------------------------------------------- + ^ + | + skb.data + */ + + ahp = x->data; + + /* Find # fragment */ + if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + goto error; + nfrags = err; + + /* Allocate temp request */ + req_tmp = ah_alloc_aead_tmp(ahp->utfm.aeadtfm, 2 * nfrags); + if (!req_tmp) { + err = -ENOMEM; + goto error; } -unlock: - spin_unlock(&x->lock); - if (err) + AH_SKB_CB(skb)->tmp_req = req_tmp; + areq = ah_tmp_aead_req(ahp->utfm.aeadtfm, req_tmp); + sg = ah_tmp_aead_sg(ahp->utfm.aeadtfm, areq); + dsg = ah_tmp_aead_dsg(sg, nfrags); + /* Set up SG - data will start at IP (inner) header (skb.data) */ + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); + clen = skb->len; + skb_push(skb, -skb_network_offset(skb)); + skb_to_sgvec(skb, dsg, 0, skb->len); + aead_request_set_callback(areq, 0, ah_output_done_hw, skb); + aead_request_set_crypt(areq, sg, dsg, clen, NULL); + + /* For AH transport mode, SG is at IP header. + + Input: + ---------------------- + | Rsvd| IP | Payload | + ---------------------- + Rsvd - space reserved for moved IP and added AH + + Output: + --------------------- + | IP | AH | Payload | + --------------------- + + For AH tunnel mode, outer IP header is formed by tunnel module. + SG is at inner IP header. + + Input: + ---------------------------------------- + | Outer IP | Rsvd | inner IP | Payload | + ---------------------------------------- + Rsvd - space reserved for added AH + + Output: + ---------------------------------------- + | Outer IP | AH | inner IP | Payload | + ---------------------------------------- + + */ + err = crypto_aead_encrypt(areq); + if (err == -EINPROGRESS) goto out; + if (err < 0) + goto error; - skb->network_header += ah_hlen; - memcpy(skb_network_header(skb), work_buf, ihl); - skb->transport_header = skb->network_header; - __skb_pull(skb, ah_hlen + ihl); + return ah_output_done2_hw(skb, err); + +error: + if (req_tmp) + ah_free_tmp(req_tmp); +out: + return err; +} + +static int ah_input_done2_hw(struct sk_buff *skb, int err) +{ + void *req_tmp = AH_SKB_CB(skb)->tmp_req; - return nexthdr; + if (err < 0) + goto out; + + err = AH_SKB_CB(skb)->nh; + + AH_DUMP_PKT(KERN_INFO, "AH input hw: ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); out: + kfree(req_tmp); return err; } +static void ah_input_done_hw(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_input_resume(skb, ah_input_done2_hw(skb, err)); +} + +static int ah_input_hw(struct xfrm_state *x, struct sk_buff *skb) +{ + int ah_hlen; + int ihl; + int err = -EINVAL; + struct ip_auth_hdr *ah; + struct ah_data *ahp; + struct sk_buff *trailer; + struct aead_request *areq; + struct scatterlist *sg; + struct scatterlist *dsg; + int nfrags; + void *req_tmp = NULL; + + /* For AH transport/tunnel mode, skb.data is at AH header. skb.len + includes payload. skb network header, transport header, and + mac headers will be updated by transport module code. + + Transport Input: + ------------------------- + | IP Hdr | AH | Payload | + ------------------------- + ^ + | + skb.data and length start here + + Tunnel Input: + ------------------------------------ + |Outer IP | AH | inner IP | Payload| + ------------------------------------ + ^ + | + skb.data and length start here + */ + + AH_DUMP_PKT(KERN_INFO, "AH input hw : ", DUMP_PREFIX_ADDRESS, + 16, 4, skb->data, skb->len, 1); + + if (skb_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto error; + + /* Find # of fragment */ + if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + goto error; + nfrags = err; + + skb->ip_summed = CHECKSUM_NONE; + + ihl = skb->data - skb_network_header(skb); + ah = (struct ip_auth_hdr *) skb->data; + ah_hlen = (ah->hdrlen + 2) << 2; + AH_SKB_CB(skb)->nh = ah->nexthdr; + + /* Allocate temp request */ + ahp = x->data; + req_tmp = ah_alloc_aead_tmp(ahp->utfm.aeadtfm, 2 * nfrags); + if (!req_tmp) { + err = -ENOMEM; + goto error; + } + AH_SKB_CB(skb)->tmp_req = req_tmp; + areq = ah_tmp_aead_req(ahp->utfm.aeadtfm, req_tmp); + sg = ah_tmp_aead_sg(ahp->utfm.aeadtfm, areq); + dsg = ah_tmp_aead_dsg(sg, nfrags); + + /* Init SG - data starts at AH header */ + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, -ihl, skb->len + ihl); + skb->network_header += ah_hlen; + skb->transport_header = skb->network_header; + __skb_pull(skb, ah_hlen); + + skb_to_sgvec(skb, dsg, -ihl, skb->len + ihl); + aead_request_set_callback(areq, 0, ah_input_done_hw, skb); + aead_request_set_crypt(areq, sg, dsg, skb->len + ah_hlen + ihl, NULL); + + /* For AH transport/tunnel mode, SG is at IP header. + + Transport Input: + ---------------------------- + | IP Hdr | AH | Payload | + ---------------------------- + IP Hdr - start of SG + + Transport Output: + ---------------------------- + | | IP Hdr | Payload | + ---------------------------- + + Tunnel Input: + ------------------------------------- + | Outer IP | AH | inner IP | Payload| + ------------------------------------- + Outer IP Hdr - start of SG + + Tunnel Output: + ------------------------------------- + | Outer IP | AH | inner IP | Payload| + ------------------------------------- + Outer IP and AH left un-touch + + */ + err = crypto_aead_decrypt(areq); + if (err == -EINPROGRESS) + goto out; + + if (err < 0) + goto error; + + return ah_input_done2(skb, err); + +error: + if (req_tmp) + ah_free_tmp(req_tmp); +out: + return err; +} + +static int ah_output(struct xfrm_state *x, struct sk_buff *skb) +{ + if ((x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) && + (x->alg_flags & (XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL | + XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT))) + return ah_output_hw(x, skb); + else + return ah_output_sw(x, skb); +} + +static int ah_input(struct xfrm_state *x, struct sk_buff *skb) +{ + if ((x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) && + (x->alg_flags & (XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL | + XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT))) + return ah_input_hw(x, skb); + else + return ah_input_sw(x, skb); +} + static void ah4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); - struct iphdr *iph = (struct iphdr *)skb->data; - struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); + struct iphdr *iph = (struct iphdr*)skb->data; + struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || @@ -220,9 +874,19 @@ static void ah4_err(struct sk_buff *skb, u32 info) static int ah_init_state(struct xfrm_state *x) { - struct ah_data *ahp = NULL; + struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; - struct crypto_hash *tfm; + struct crypto_ahash *ahashtfm; + struct crypto_aead *aeadtfm; + char alg_name[CRYPTO_MAX_ALG_NAME]; + char *key; + int key_len; + int digest_size; + struct rtattr *rta; + struct ah_param { + __be32 spi; + __be32 seq; + } *param; if (!x->aalg) goto error; @@ -234,40 +898,98 @@ static int ah_init_state(struct xfrm_state *x) if (ahp == NULL) return -ENOMEM; - tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - goto error; + /* Try AH hardware offload first */ + switch (x->props.mode) { + case XFRM_MODE_TUNNEL: + snprintf(alg_name, ARRAY_SIZE(alg_name), + "tunnel(ah(%s))", x->aalg->alg_name); + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_AH; + break; + case XFRM_MODE_TRANSPORT: + snprintf(alg_name, ARRAY_SIZE(alg_name), + "transport(ah(%s))", x->aalg->alg_name); + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_AH; + break; + default: + strncpy(alg_name, x->aalg->alg_name, ARRAY_SIZE(alg_name)); + break; + } + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) { + aeadtfm = crypto_alloc_aead(alg_name, 0, 0); + if (IS_ERR(aeadtfm)) { + /* No AH hardware offload, go to software AH */ + x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_AH); + aeadtfm = NULL; + ahashtfm = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); + if (IS_ERR(ahashtfm)) + goto error; + ahp->utfm.atfm = ahashtfm; + } else { + ahashtfm = NULL; + ahp->utfm.aeadtfm = aeadtfm; + } + } else { + aeadtfm = NULL; + ahashtfm = crypto_alloc_ahash(alg_name, 0, 0); + if (IS_ERR(ahashtfm)) + goto error; + ahp->utfm.atfm = ahashtfm; + } - ahp->tfm = tfm; - if (crypto_hash_setkey(tfm, x->aalg->alg_key, - (x->aalg->alg_key_len + 7) / 8)) - goto error; + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) { + /* For AH offload, we must load AH offload parameters + via setkey function. */ + key_len = RTA_SPACE(sizeof(*param)) + + ((x->aalg->alg_key_len + 7) / 8); + key = kmalloc(key_len, GFP_KERNEL); + rta = (void *) key; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->spi = cpu_to_be32(x->id.spi); + param->seq = cpu_to_be32(x->replay.oseq); + memcpy(key + RTA_SPACE(sizeof(*param)), + x->aalg->alg_key, + (x->aalg->alg_key_len + 7) / 8); + if (crypto_aead_setkey(aeadtfm, key, key_len)) + goto error; + digest_size = crypto_aead_tfm(aeadtfm)->__crt_alg-> + cra_aead.maxauthsize; + } else { + key_len = (x->aalg->alg_key_len + 7) / 8; + key = x->aalg->alg_key; + if (crypto_ahash_setkey(ahashtfm, key, key_len)) + goto error; + digest_size = crypto_ahash_digestsize(ahashtfm); + } /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here - * after a successful crypto_alloc_hash(). + * after a successful crypto_alloc_ahash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); - if (aalg_desc->uinfo.auth.icv_fullbits/8 != - crypto_hash_digestsize(tfm)) { + if (aalg_desc->uinfo.auth.icv_fullbits/8 != digest_size) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_hash_digestsize(tfm), + x->aalg->alg_name, digest_size, aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; - BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); - ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); - if (!ahp->work_icv) - goto error; + /* For AH hardware offload, set ICV size */ + if (aeadtfm) + crypto_aead_setauthsize(aeadtfm, ahp->icv_trunc_len); x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); @@ -279,8 +1001,7 @@ static int ah_init_state(struct xfrm_state *x) error: if (ahp) { - kfree(ahp->work_icv); - crypto_free_hash(ahp->tfm); + crypto_free_ahash(ahp->utfm.atfm); kfree(ahp); } return -EINVAL; @@ -293,12 +1014,11 @@ static void ah_destroy(struct xfrm_state *x) if (!ahp) return; - kfree(ahp->work_icv); - crypto_free_hash(ahp->tfm); + crypto_free_ahash(ahp->utfm.atfm); + ahp->utfm.atfm = NULL; kfree(ahp); } - static const struct xfrm_type ah_type = { .description = "AH4", diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 12f7287e902..ffb24aeeaef 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -15,45 +15,127 @@ #include <net/icmp.h> #include <net/protocol.h> #include <net/udp.h> +#include <linux/highmem.h> + +//#define DEBUG_ESP +#ifdef DEBUG_ESP +# define ESP_DUMP_PKT print_hex_dump +# define ESP_PRINTK printk +#else +# define ESP_DUMP_PKT(arg...) +# define ESP_PRINTK(arg...) +#endif struct esp_skb_cb { struct xfrm_skb_cb xfrm; - void *tmp; + void *req_ctx; + struct esp_data *esp; + char flags; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) +struct esp_offload_param { + unsigned short encap_sport; + unsigned short encap_dport; + unsigned char nh; + unsigned char pad_len; +} __attribute__((packed)); + +#define ESP_OFFLOAD_INFO_SIZE ALIGN(sizeof(struct esp_offload_param), 4) + /* - * Allocate an AEAD request structure with extra space for SG and IV. + * Allocate an AEAD request structure with extra space for SG, IV, and 16 + * bytes info data for HW offload. * * For alignment considerations the IV is placed at the front, followed * by the request and finally the SG list. * * TODO: Use spare space in skb for this where possible. */ -static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) + +static int esp_req_ctx_size(struct crypto_aead *aead, int nfrags) { unsigned int len; - len = crypto_aead_ivsize(aead); - if (len) { - len += crypto_aead_alignmask(aead) & - ~(crypto_tfm_ctx_alignment() - 1); - len = ALIGN(len, crypto_tfm_ctx_alignment()); - } - + len = ESP_OFFLOAD_INFO_SIZE; + len += crypto_aead_ivsize(aead); + len += crypto_aead_alignmask(aead) & + ~(crypto_tfm_ctx_alignment() - 1); + len = ALIGN(len, crypto_tfm_ctx_alignment()); + len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; - return kmalloc(len, GFP_ATOMIC); + return len; +} + +static void *esp_alloc_req_ctx( struct esp_skb_cb *esp_skb, + struct crypto_aead *aead, + int nfrags) +{ + void *ctx; + unsigned int len; + struct esp_data *esp = esp_skb->esp; + +#if CONFIG_INET_ESP_NR_REQ_CACHE > 0 + if (nfrags <= ESP_NFRAGS_CACHE) { + esp_skb->flags |= 0x01; + if (atomic_read(&esp->req_cache_cnt)) { + ctx = esp->req_cache[esp->req_cache_head]; + esp->req_cache_head = (esp->req_cache_head + 1) % + ESP_REQ_CACHE_MAX; + atomic_dec(&esp->req_cache_cnt); + return ctx; + } + len = esp->req_cache_size + + sizeof(struct scatterlist) * ESP_NFRAGS_CACHE; + ctx = kmalloc(len, GFP_ATOMIC); + } else { + esp_skb->flags &= ~0x01; + len = esp->req_cache_size + + sizeof(struct scatterlist) * nfrags; + ctx = kmalloc(len, GFP_ATOMIC); + } +#else + len = esp->req_cache_size + + sizeof(struct scatterlist) * nfrags; + ctx = kmalloc(len, GFP_ATOMIC); +#endif + return ctx; +} + +static void esp_free_req_ctx(struct esp_skb_cb *esp_skb) +{ +#if CONFIG_INET_ESP_NR_REQ_CACHE > 0 + struct esp_data *esp = esp_skb->esp; + + if (esp_skb->flags & 0x01) { + if (atomic_read(&esp->req_cache_cnt) < ESP_REQ_CACHE_MAX) { + esp->req_cache[esp->req_cache_tail] = esp_skb->req_ctx; + esp->req_cache_tail = (esp->req_cache_tail + 1) % + ESP_REQ_CACHE_MAX; + atomic_inc(&esp->req_cache_cnt); + return; + } + } +#endif + kfree(esp_skb->req_ctx); +} + +static inline void *esp_tmp_offload_info(void *tmp) +{ + return tmp; } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) { return crypto_aead_ivsize(aead) ? - PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; + PTR_ALIGN((u8 *) tmp + ESP_OFFLOAD_INFO_SIZE, + crypto_aead_alignmask(aead) + 1) : + ((u8 *) tmp + ESP_OFFLOAD_INFO_SIZE); } static inline struct aead_givcrypt_request *esp_tmp_givreq( @@ -95,15 +177,14 @@ static inline struct scatterlist *esp_givreq_sg( static void esp_output_done(struct crypto_async_request *base, int err) { - struct sk_buff *skb = base->data; - - kfree(ESP_SKB_CB(skb)->tmp); + struct sk_buff *skb = base->data; + + esp_free_req_ctx(ESP_SKB_CB(skb)); xfrm_output_resume(skb, err); } -static int esp_output(struct xfrm_state *x, struct sk_buff *skb) +static int esp_output_sw(struct xfrm_state *x, struct sk_buff *skb) { - int err; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_givcrypt_request *req; @@ -112,8 +193,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct esp_data *esp; struct sk_buff *trailer; void *tmp; - u8 *iv; - u8 *tail; + int err; + u8 *iv; + u8 *tail; int blksize; int clen; int alen; @@ -139,7 +221,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) goto error; nfrags = err; - tmp = esp_alloc_tmp(aead, nfrags + 1); + ESP_SKB_CB(skb)->esp = esp; + + tmp = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags + 1); if (!tmp) goto error; @@ -157,6 +241,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) } while (0); tail[clen - skb->len - 2] = (clen - skb->len) - 2; tail[clen - skb->len - 1] = *skb_mac_header(skb); + pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); @@ -197,7 +282,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) *skb_mac_header(skb) = IPPROTO_UDP; } - esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); @@ -210,28 +294,265 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) aead_givcrypt_set_callback(req, 0, esp_output_done, skb); aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); - aead_givcrypt_set_giv(req, esph->enc_data, + aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq.output); - ESP_SKB_CB(skb)->tmp = tmp; + ESP_SKB_CB(skb)->req_ctx = tmp; + err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; if (err == -EBUSY) err = NET_XMIT_DROP; + + + esp_free_req_ctx(ESP_SKB_CB(skb)); + +error: + return err; +} + +static int esp_output_done2_hw(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + void *esp_req = ESP_SKB_CB(skb)->req_ctx; + struct esp_offload_param *param; + + if (err >= 0) { + err = 0; + } + else { + if ( err < 0) + goto out; + } + param = esp_tmp_offload_info(esp_req); + if (param->encap_sport) { + /* UDP Encapsulation - Fill in dynamic fields */ + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl << 2; + struct udphdr *uh = (struct udphdr *) (((u8 *) iph) + ihl); + uh->source = param->encap_sport; + uh->dest = param->encap_dport; + *skb_mac_header(skb) = IPPROTO_UDP; + } else { + *skb_mac_header(skb) = IPPROTO_ESP; + } +out: + esp_free_req_ctx(ESP_SKB_CB(skb)); + return err; +} + +static void esp_output_done_hw(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_output_resume(skb, esp_output_done2_hw(base, err)); +} + +static int esp_output_hw(struct xfrm_state *x, struct sk_buff *skb) +{ + struct crypto_aead *aead; + struct aead_request *req; + struct scatterlist *sg; + struct scatterlist *dsg; + struct esp_data *esp; + struct sk_buff *trailer; + struct esp_offload_param *param; + void *esp_req; + int err; + u8 *iv; + int clen; + int alen; + int pad_len; + int nfrags; + + /* For ESP transport mode, skb.data is at IP header. skb.len + includes IP header and payload. skb network header, transport + header, and mac headers pointer are updated by transport module + code. + + Input: + -------------------------------------------- + | Network Hdr| Transport Hdr| IP | Payload | + -------------------------------------------- + ^ + | + skb.data + + For ESP tunnel mode, outer IP header is formed by tunnel module. + skb network header, transport header, and mac header pointer are + updated by tunnel module code. + + Input: + ----------------------------------------------------- + | Outer IP | reserved1 | inner IP Header | Payload | + ----------------------------------------------------- + ^ + | + skb.data + + Outer IP - Formed by tunnel mode code + reserved1* - space reserved for UDP, ESP, SEQ, and IV + */ + + err = -ENOMEM; + + esp = x->data; + aead = esp->aead; + alen = crypto_aead_authsize(aead); + + /* Compute pad length to expand skb tail for padding by HW */ + if (x->props.mode == XFRM_MODE_TUNNEL) { + clen = skb->len; + } else { + struct iphdr *iph = (struct iphdr *) skb->data; + clen = skb->len - (iph->ihl << 2); + } + + pad_len = ALIGN(clen + 2, ALIGN(crypto_aead_blocksize(aead), 4)); + if (esp->padlen) + pad_len = ALIGN(pad_len, esp->padlen); + pad_len -= clen; + + /* Expand tailer to include padding and ICV */ + if ((err = skb_cow_data(skb, pad_len + alen, &trailer)) < 0) + goto error; + nfrags = err; + + ESP_SKB_CB(skb)->esp = esp; + + esp_req = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags * 2); + if (!esp_req) + goto error; + + param = esp_tmp_offload_info(esp_req); + iv = esp_tmp_iv(aead, param); /* Not used */ + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = sg + nfrags; + + /* Setup SG */ + sg_init_table(sg, nfrags); + skb_to_sgvec(skb, sg, 0, skb->len); + + /* Prepare SKB to include padded length and ESP header */ + clen = skb->len; + pskb_put(skb, trailer, pad_len + alen); + skb_push(skb, x->props.header_len); + sg_init_table(dsg, nfrags); + if (x->props.mode == XFRM_MODE_TUNNEL) + skb_to_sgvec(skb, dsg, 20, skb->len - 20); + else + skb_to_sgvec(skb, dsg, 0, skb->len); + /* This is non-NULL only with UDP Encapsulation. + UDP Encapsulation is done by hardware. Port of UDP encapsulation + info are passed to hardware offload driver at ESP init time. Dynamic + info per a packet are saved and filled in after hardware offload + */ + if (x->encap) { + struct xfrm_encap_tmpl *encap = x->encap; + + spin_lock_bh(&x->lock); + param->encap_sport = encap->encap_sport; + param->encap_dport = encap->encap_dport; + spin_unlock_bh(&x->lock); + } else { + param->encap_sport = 0; + } + + /* Format input parameters for hardware ESP offload algorithm. + For ESP transport mode, IP header and payload are passed as + the source sg. All associate data such as SPI, seq and etc, + are loaded at init time. + + Input Src: + ---------------------------------------- + | reserved1 | IP | Payload | reserved2 | + ---------------------------------------- + ^ + | + start if source sg + + ---------------------------------------- + | reserved1 | IP | Payload | reserved2 | + ---------------------------------------- + ^ + | + start if destination sg + + reserved1 - space reserved for header (UDP ENCAP, ESP, & IV) + reserved2 - space reserved for padding + ICV + + Output: + ------------------------------------------------------------- + | IP | UDP ENCAP | ESP | IV | Payload | IPSec Padding | ICV | + ------------------------------------------------------------- + UDP ENCAP if NAT + + For ESP tunnel mode, IP header and payload is passed as + the source sg. All associate data such as SPI, seq and etc, + are loaded at init time. The outer IP is formed by tunnel module. + + Input: + ---------------------------------------------------- + | Outer IP | reserved1 | inner IP Header | Payload | + ---------------------------------------------------- + ^ + | + start of source sg + + ---------------------------------------------------- + | Outer IP | reserved1 | inner IP Header | Payload | + ---------------------------------------------------- + ^ (inner IP header not moved) + | + start if destination sg + + Outer IP - formed by tunnel mode code + reserved1 - space reserved for UDP, ESP, SEQ, and IV + inner IP Header - Start of sg. length is inner IP Header + payload + + Output: + ------------------------------------------------------------------ + |Outer IP|UDP ENCAP|SPI+SEQ|IV|Inner IP|Payload|IPSec Padding|ICV| + ------------------------------------------------------------------ + Outer IP - Formed by tunnel mode code + UDP ENCAP if NAT + + */ + aead_request_set_callback(req, 0, esp_output_done_hw, skb); + aead_request_set_crypt(req, sg, dsg, clen, iv); + + ESP_SKB_CB(skb)->req_ctx = esp_req; + + err = crypto_aead_encrypt(req); + if (err == -EINPROGRESS) + goto error; + + if (err == -EBUSY) + err = NET_XMIT_DROP; - kfree(tmp); + err = esp_output_done2_hw(&req->base, err); error: return err; } +static int esp_output(struct xfrm_state *x, struct sk_buff *skb) +{ + + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) + return esp_output_hw(x, skb); + else + return esp_output_sw(x, skb); +} + static int esp_input_done2(struct sk_buff *skb, int err) { struct iphdr *iph; - struct xfrm_state *x = xfrm_input_state(skb); - struct esp_data *esp = x->data; + struct xfrm_state *x = xfrm_input_state(skb); + struct esp_data *esp = ESP_SKB_CB(skb)->esp; struct crypto_aead *aead = esp->aead; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); @@ -239,8 +560,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) int ihl; u8 nexthdr[2]; int padlen; - - kfree(ESP_SKB_CB(skb)->tmp); + + esp_free_req_ctx(ESP_SKB_CB(skb)); if (unlikely(err)) goto out; @@ -250,6 +571,7 @@ static int esp_input_done2(struct sk_buff *skb, int err) err = -EINVAL; padlen = nexthdr[0]; + if (padlen + 2 + alen >= elen) goto out; @@ -300,7 +622,6 @@ static int esp_input_done2(struct sk_buff *skb, int err) skb_set_transport_header(skb, -ihl); err = nexthdr[1]; - /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; @@ -321,7 +642,7 @@ static void esp_input_done(struct crypto_async_request *base, int err) * expensive, so we only support truncated data, which is the recommended * and common case. */ -static int esp_input(struct xfrm_state *x, struct sk_buff *skb) +static int esp_input_sw(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; @@ -347,15 +668,18 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) nfrags = err; err = -ENOMEM; - tmp = esp_alloc_tmp(aead, nfrags + 1); + + ESP_SKB_CB(skb)->esp = esp; + + tmp = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags + 1); if (!tmp) goto out; - ESP_SKB_CB(skb)->tmp = tmp; - iv = esp_tmp_iv(aead, tmp); + ESP_SKB_CB(skb)->req_ctx = tmp; + iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); - sg = asg + 1; + sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; @@ -371,7 +695,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); - + err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; @@ -382,6 +706,262 @@ out: return err; } +static int esp_input_done2_hw(struct sk_buff *skb, int err) +{ + struct iphdr *iph; + struct xfrm_state *x = xfrm_input_state(skb); + struct esp_data *esp = ESP_SKB_CB(skb)->esp; + struct crypto_aead *aead = esp->aead; + int alen = crypto_aead_authsize(aead); + int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); + int elen = skb->len - hlen; + int ihl; + u8 nexthdr[2]; + int padlen; + void *esp_req = ESP_SKB_CB(skb)->req_ctx; + + if (err >= 0) { + if ( err == 0) { + if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) + BUG(); + } + else { + nexthdr[0] = (unsigned char)(err >> 8); + nexthdr[1] = (unsigned char)(err >> 0); + } + } + else { + goto out; + } + + err = -EINVAL; + padlen = nexthdr[0] + 2; + + if (padlen + alen > elen) + goto out; + + iph = ip_hdr(skb); + ihl = iph->ihl * 4; + + if (x->encap) { + struct xfrm_encap_tmpl *encap = x->encap; + struct esp_offload_param *param = esp_tmp_offload_info(esp_req); + /* + * 1) if the NAT-T peer's IP or port changed then + * advertize the change to the keying daemon. + * This is an inbound SA, so just compare + * SRC ports. + */ + if (iph->saddr != x->props.saddr.a4 || + param->encap_sport != encap->encap_sport) { + xfrm_address_t ipaddr; + + ipaddr.a4 = iph->saddr; + km_new_mapping(x, &ipaddr, param->encap_sport); + + /* XXX: perhaps add an extra + * policy check here, to see + * if we should allow or + * reject a packet from a + * different source + * address/port. + */ + } + + /* + * 2) ignore UDP/TCP checksums in case + * of NAT-T in Transport Mode, or + * perform other post-processing fixes + * as per draft-ietf-ipsec-udp-encaps-06, + * section 3.1.2 + */ + if (x->props.mode == XFRM_MODE_TRANSPORT) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + pskb_trim(skb, skb->len - alen - padlen); + __skb_pull(skb, hlen); + skb_set_transport_header(skb, -ihl); + + err = nexthdr[1]; + + /* RFC4303: Drop dummy packets without any error */ + if (err == IPPROTO_NONE) + err = -EINVAL; +out: + esp_free_req_ctx(ESP_SKB_CB(skb)); + return err; +} + +static void esp_input_done_hw(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + + xfrm_input_resume(skb, esp_input_done2_hw(skb, err)); +} + +static int esp_input_hw(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ip_esp_hdr *esph; + struct esp_data *esp = x->data; + struct crypto_aead *aead = esp->aead; + struct aead_request *req; + struct sk_buff *trailer; + int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); + int nfrags; + void *esp_req; + u8 *iv; + struct scatterlist *sg; + struct scatterlist *dsg; + struct esp_offload_param *param; + int err = -EINVAL; + int hdr_len; + int clen; + int src_len; + + + /* For ESP transport mode, skb.data is at ESP header. skb.len + includes IP header and payload. skb network header, transport + header, and mac header is updated by transport module code. + + Input: + ------------------------------------------------------------- + | IP Hdr | UDP ENCAP | ESP | IV | Payload | IPSec Pad | ICV | + ------------------------------------------------------------- + ^ + | + skb.data and length start here + For ESP tunnel mode, + + Input: + ---------------------------------------------------------- + |Outer IP|UDP ENCAP|ESP|IV|inner IP|Payload|IPSec Pad|ICV| + ---------------------------------------------------------- + ^ + | + skb.data and length start here + */ + if (!pskb_may_pull(skb, sizeof(*esph))) + goto out; + + if (elen <= 0) + goto out; + + if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + goto out; + nfrags = err; + + err = -ENOMEM; + + ESP_SKB_CB(skb)->esp = esp; + + esp_req = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags * 2); + if (!esp_req) + goto out; + + ESP_SKB_CB(skb)->req_ctx = esp_req; + param = esp_tmp_offload_info(esp_req); + iv = esp_tmp_iv(aead, param); + req = esp_tmp_req(aead, iv); + sg = esp_req_sg(aead, req); + dsg = sg + nfrags; + + skb->ip_summed = CHECKSUM_NONE; + + /* This is non-NULL only with UDP Encapsulation. + UDP Encapsulation is done after ESP processing. Save info as + required as hardware offload driver can override these info. */ + if (x->encap) { + struct iphdr *iph = ip_hdr(skb); + struct udphdr *uh; + uh = (struct udphdr *) ((u8 *) iph + iph->ihl * 4); + param->encap_sport = uh->source; + } + + /* Setup SG */ + sg_init_table(sg, nfrags); + hdr_len = skb_network_offset(skb); + if (x->props.mode == XFRM_MODE_TUNNEL) { + clen = skb->len; + skb_to_sgvec(skb, sg, 0, clen); + } else { + clen = -hdr_len + skb->len; + skb_to_sgvec(skb, sg, hdr_len, clen); + } + src_len = clen; + + sg_init_table(dsg, nfrags); + if (x->props.mode == XFRM_MODE_TUNNEL) { + clen -= hdr_len + x->props.header_len; + skb_to_sgvec(skb, dsg, hdr_len + x->props.header_len, clen); + } else { + clen -= crypto_aead_authsize(aead); + clen -= (x->props.header_len); + /* clen -= crypto_aead_ivsize(aead); + clen += hdr_len; */ + skb_to_sgvec(skb, dsg, hdr_len + x->props.header_len, clen); + } + + /* For ESP transport mode: + + Input: + -------------------------------------------------------------------- + |IP|UDP ENCAP|ESP|IV| Payload | IPSec Pad | ICV | + -------------------------------------------------------------------- + ^ + | + start of source SG + + ------------------------------------------------------------- + | reserved | IP | Payload | Pad | Pad length | Next hdr | + ------------------------------------------------------------- + ^ (Payload position not moved) + | + start of destination SG + + Output: + ------------------------------------------------------------- + | reserved | IP | Payload | Pad | Pad length | Next hdr | + ------------------------------------------------------------- + IP header is moved right before payload field. Payload not moved. + + For ESP tunnel mode: + + Input: + -------------------------------------------------------------- + |Out IP|UDP ENCAP|ESP | IV | In IP | Payload | IPSec Pad |ICV| + -------------------------------------------------------------- + ESP - start of SG and length + + Output: + --------------------------------------------------------------------- + | reserved | In IP | Payload |Pad |Pad len|Next Hder| + --------------------------------------------------------------------- + reserved is removed header but inner IP and payload are left at + same position + + */ + aead_request_set_callback(req, 0, esp_input_done_hw, skb); + aead_request_set_crypt(req, sg, dsg, src_len, iv); + + err = crypto_aead_decrypt(req); + if (err == -EINPROGRESS) + goto out; + + err = esp_input_done2_hw(skb, err); + +out: + return err; +} + +static int esp_input(struct xfrm_state *x, struct sk_buff *skb) +{ + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) + return esp_input_hw(x, skb); + else + return esp_input_sw(x, skb); +} + static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; @@ -414,16 +994,16 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) static void esp4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); - struct iphdr *iph = (struct iphdr *)skb->data; - struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); + struct iphdr *iph = (struct iphdr*)skb->data; + struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; - x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); - if (!x) + x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, +AF_INET); if (!x) return; NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", ntohl(esph->spi), ntohl(iph->daddr)); @@ -438,6 +1018,16 @@ static void esp_destroy(struct xfrm_state *x) return; crypto_free_aead(esp->aead); + +#if CONFIG_INET_ESP_NR_REQ_CACHE > 0 + /* Delete request cache */ + while (atomic_dec_return(&esp->req_cache_cnt)) { + kfree(esp->req_cache[esp->req_cache_head]); + esp->req_cache_head = (esp->req_cache_head + 1) % + ESP_REQ_CACHE_MAX; + } +#endif + kfree(esp); } @@ -446,16 +1036,91 @@ static int esp_init_aead(struct xfrm_state *x) struct esp_data *esp = x->data; struct crypto_aead *aead; int err; + char alg_name[CRYPTO_MAX_ALG_NAME]; + char *key; + int key_len; + struct rtattr *rta; + struct esp_param { + __be32 spi; + __be32 seq; + __be16 pad_block_size; + __be16 encap_uhl; + } *param; - aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); + switch (x->props.mode) { + case XFRM_MODE_TUNNEL: + snprintf(alg_name, ARRAY_SIZE(alg_name), + "tunnel(esp(%s))", x->aead->alg_name); + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_ESP; + break; + case XFRM_MODE_TRANSPORT: + snprintf(alg_name, ARRAY_SIZE(alg_name), + "transport(esp(%s))", x->aead->alg_name); + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_ESP; + break; + default: + strncpy(alg_name, x->aead->alg_name, ARRAY_SIZE(alg_name)); + break; + } + aead = crypto_alloc_aead(alg_name, 0, 0); + if (IS_ERR(aead) && (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)) { + x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_ESP); + aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); + } err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; - err = crypto_aead_setkey(aead, x->aead->alg_key, - (x->aead->alg_key_len + 7) / 8); + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) { + /* For esp offload, we must load esp offload parameters + via setkey function. */ + key_len = RTA_SPACE(sizeof(*param)) + + ((x->aead->alg_key_len + 7) / 8); + key = kmalloc(key_len, 0); + rta = (void *) key; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->spi = cpu_to_be32(x->id.spi); + param->seq = cpu_to_be32(x->replay.oseq); + if (x->encap) { + int encap_type; + + spin_lock_bh(&x->lock); + encap_type = x->encap->encap_type; + spin_unlock_bh(&x->lock); + + switch (encap_type) { + default: + case UDP_ENCAP_ESPINUDP: + param->encap_uhl = cpu_to_be16(sizeof(struct udphdr)); + break; + case UDP_ENCAP_ESPINUDP_NON_IKE: + param->encap_uhl = cpu_to_be16(sizeof(struct udphdr) + + sizeof(__be32)*2); + break; + } + } else { + param->encap_uhl = 0; + } + param->pad_block_size = cpu_to_be16(esp->padlen); + memcpy(key + RTA_SPACE(sizeof(*param)), + x->aead->alg_key, + (x->aead->alg_key_len + 7) / 8); + } else { + key_len = (x->aead->alg_key_len + 7) / 8; + key = x->aead->alg_key; + } + + err = crypto_aead_setkey(aead, key, key_len); + if (key != x->aead->alg_key) + kfree(key); if (err) goto error; @@ -471,25 +1136,68 @@ static int esp_init_authenc(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; - struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; + struct esp_authenc_param { + __be32 spi; + __be32 seq; + __be16 pad_block_size; + __be16 encap_uhl; + struct crypto_authenc_key_param authenc_param; + /* Must be last */ + } *esp_param = NULL; + struct crypto_authenc_key_param *param = NULL; err = -EINVAL; if (x->ealg == NULL) goto error; err = -ENAMETOOLONG; - if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", - x->aalg ? x->aalg->alg_name : "digest_null", - x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) - goto error; + + switch (x->props.mode) { + case XFRM_MODE_TUNNEL: + if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, + "tunnel(esp(authenc(%s,%s)))", + x->aalg ? x->aalg->alg_name : "digest_null", + x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) + goto error; + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_ESP; + break; + case XFRM_MODE_TRANSPORT: + if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, + "transport(esp(authenc(%s,%s)))", + x->aalg ? x->aalg->alg_name : "digest_null", + x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) + goto error; + x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_ESP; + break; + default: + if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%s)", + x->aalg ? x->aalg->alg_name : "digest_null", + x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) + goto error; + break; + } aead = crypto_alloc_aead(authenc_name, 0, 0); + if (IS_ERR(aead) && (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)) { + x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL + | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT + | XFRM_ALGO_FLAGS_OFFLOAD_ESP); + if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%s)", + x->aalg ? x->aalg->alg_name : "digest_null", + x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) + goto error; + aead = crypto_alloc_aead(authenc_name, 0, 0); + } err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -497,7 +1205,11 @@ static int esp_init_authenc(struct xfrm_state *x) esp->aead = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + - (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); + (x->ealg->alg_key_len + 7) / 8; + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) + keylen += RTA_SPACE(sizeof(*esp_param)); + else + keylen += RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) @@ -506,9 +1218,15 @@ static int esp_init_authenc(struct xfrm_state *x) p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; - rta->rta_len = RTA_LENGTH(sizeof(*param)); - param = RTA_DATA(rta); - p += RTA_SPACE(sizeof(*param)); + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) { + rta->rta_len = RTA_LENGTH(sizeof(*esp_param)); + esp_param = RTA_DATA(rta); + p += RTA_SPACE(sizeof(*esp_param)); + } else { + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + p += RTA_SPACE(sizeof(*param)); + } if (x->aalg) { struct xfrm_algo_desc *aalg_desc; @@ -535,7 +1253,39 @@ static int esp_init_authenc(struct xfrm_state *x) goto free_key; } - param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) { + esp_param->authenc_param.enckeylen = + cpu_to_be32((x->ealg->alg_key_len + 7) / 8); + /* For esp offload, we must load esp offload parameters + via setkey function. */ + esp_param->spi = cpu_to_be32(x->id.spi); + esp_param->seq = cpu_to_be32(x->replay.oseq); + if (x->encap) { + int encap_type; + + spin_lock_bh(&x->lock); + encap_type = x->encap->encap_type; + spin_unlock_bh(&x->lock); + + switch (encap_type) { + default: + case UDP_ENCAP_ESPINUDP: + esp_param->encap_uhl = + cpu_to_be16(sizeof(struct udphdr)); + break; + case UDP_ENCAP_ESPINUDP_NON_IKE: + esp_param->encap_uhl = + cpu_to_be16(sizeof(struct udphdr) + + sizeof(__be32)*2); + break; + } + } else { + esp_param->encap_uhl = 0; + } + esp_param->pad_block_size = cpu_to_be16(esp->padlen); + } else { + param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); + } memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); @@ -569,7 +1319,12 @@ static int esp_init_state(struct xfrm_state *x) goto error; aead = esp->aead; - +#if CONFIG_INET_ESP_NR_REQ_CACHE > 0 + atomic_set(&esp->req_cache_cnt, 0); + esp->req_cache_head = 0; + esp->req_cache_tail = 0; +#endif + esp->req_cache_size = esp_req_ctx_size(aead, 0); esp->padlen = 0; x->props.header_len = sizeof(struct ip_esp_hdr) + diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f1813bc7108..8b8b2766253 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -610,7 +610,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, /* * We can't seek on a socket input */ - if (unlikely(*ppos)) + if (unlikely(ppos)) return -ESPIPE; ret = spliced = 0; @@ -623,8 +623,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, if (ret < 0) break; else if (!ret) { - if (spliced) + if (spliced >= len) break; + if (flags & SPLICE_F_NONBLOCK) { + ret = -EAGAIN; + break; + } if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { @@ -1334,6 +1338,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_eat_skb(sk, skb, 0); if (!desc->count) break; + tp->copied_seq = seq; } tp->copied_seq = seq; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index fd840c7d75e..bf1498c138e 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -23,6 +23,15 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT) { + /* Hardware offload will take care of moving the IP header */ + skb_set_network_header(skb, -x->props.header_len); + skb->mac_header = skb->network_header + + offsetof(struct iphdr, protocol); + skb->transport_header = skb->network_header + ihl; + return 0; + } + skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); @@ -42,8 +51,16 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { - int ihl = skb->data - skb_transport_header(skb); + int ihl; + + if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT) { + /* Hardware offload will take care of move the IP header */ + skb->network_header = skb->transport_header; + skb_reset_transport_header(skb); + return 0; + } + ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); |