aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig9
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c101
-rw-r--r--net/core/macsec.c445
-rw-r--r--net/core/skbuff.c227
-rw-r--r--net/ipv4/Kconfig13
-rw-r--r--net/ipv4/ah4.c884
-rw-r--r--net/ipv4/esp4.c861
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/xfrm4_mode_transport.c19
10 files changed, 2426 insertions, 143 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb76..649ee46c8f5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -79,6 +79,15 @@ source "net/netlabel/Kconfig"
endif # if INET
+config NET_MACSEC
+ bool "IP: MACSEC"
+ select CRYPTO
+ select CRYPTO_GCM
+ ---help---
+ Support for Macsec.
+
+ If unsure, say Y.
+
config NETWORK_SECMARK
bool "Security Marking"
help
diff --git a/net/core/Makefile b/net/core/Makefile
index 796f46eece5..618d0f3416e 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+obj-$(CONFIG_NET_MACSEC) += macsec.o
obj-$(CONFIG_XFRM) += flow.o
obj-y += net-sysfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 74d0ccef22d..9b2e671c7b7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -104,6 +104,7 @@
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
+#include <net/macsec.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
@@ -1721,6 +1722,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
+#ifdef CONFIG_NET_MACSEC
+ if (netdev_macsec_priv(dev)) {
+ rc = dev->macsec_output_hw(skb, dev);
+ if (rc == -EINPROGRESS)
+ return 0;
+ }
+#endif
rc = ops->ndo_start_xmit(skb, dev);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
@@ -2350,7 +2358,21 @@ ncls:
skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
-
+
+#ifdef CONFIG_NET_MACSEC
+ if (macsec_type_trans(skb) == ETH_P_MACSEC) {
+ if (skb->dev->macsec_priv) {
+ ret = skb->dev->macsec_input_hw(skb);
+ if (ret == -EINPROGRESS) {
+ ret = 0;
+ goto out;
+ }
+ }
+ kfree_skb(skb);
+ ret = NET_RX_DROP;
+ goto out;
+ }
+#endif
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
@@ -2379,6 +2401,55 @@ out:
}
EXPORT_SYMBOL(netif_receive_skb);
+int macsec_netif_receive_skb(struct sk_buff *skb, __be16 type)
+{
+ struct packet_type *ptype, *pt_prev;
+ struct net_device *orig_dev;
+ struct net_device *null_or_orig;
+ int ret = NET_RX_DROP;
+
+ pt_prev = NULL;
+#if 0
+ orig_dev = skb_bond(skb);
+ if (!orig_dev)
+ return NET_RX_DROP;
+#endif
+ //printk("calling macsec_netif_receive_skb\n");
+ null_or_orig = NULL;
+ orig_dev = skb->dev;
+ if (orig_dev->master) {
+ printk("Master is Different\n");
+ if (skb_bond_should_drop(skb))
+ null_or_orig = orig_dev; /* deliver only exact match */
+ else
+ skb->dev = orig_dev->master;
+ }
+
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+ if (ptype->type == type &&
+ (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ }
+ pt_prev = ptype;
+ }
+ }
+ if (pt_prev) {
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ } else {
+ if (skb_shinfo(skb)->nr_frags) {
+ printk(KERN_ERR "skb has frags which is not possible !!!\n");
+ }
+ kfree_skb(skb);
+ ret = NET_RX_DROP;
+ }
+
+ return ret;
+
+}
+
/* Network device is going away, flush any packets still pending */
static void flush_backlog(void *arg)
{
@@ -4328,6 +4399,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
int err;
struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
const struct net_device_ops *ops;
+ void *mac_priv;
if (!dev)
return -ENODEV;
@@ -4392,6 +4464,31 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
return dev_change_name(dev, ifr->ifr_newname);
+ case SIOCSETMACSEC:
+#ifdef CONFIG_NET_MACSEC
+ mac_priv = netdev_macsec_priv(dev);
+ err = 0;
+ if (!mac_priv){
+ err = macsec_init_state(dev);
+ } else {
+ printk("Macsec session already set\n");
+ return -EEXIST;
+ }
+ dev->hard_header_len = ETH_HLEN + 8;
+ return err;
+#else
+ return -EINVAL;
+#endif
+
+ case SIOCUNSETMACSEC:
+#ifdef CONFIG_NET_MACSEC
+ macsec_destroy(dev);
+ dev->hard_header_len = ETH_HLEN;
+ return 0;
+#else
+ return -EINVAL;
+#endif
+
/*
* Unknown or private ioctl
*/
@@ -4550,6 +4647,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSIFFLAGS:
case SIOCSIFMETRIC:
case SIOCSIFMTU:
+ case SIOCSETMACSEC:
+ case SIOCUNSETMACSEC:
case SIOCSIFMAP:
case SIOCSIFHWADDR:
case SIOCSIFSLAVE:
diff --git a/net/core/macsec.c b/net/core/macsec.c
new file mode 100644
index 00000000000..7c3984de39b
--- /dev/null
+++ b/net/core/macsec.c
@@ -0,0 +1,445 @@
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/macsec.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+
+#define MACSEC_SKB_CB(__skb) ((struct macsec_skb_cb *)&((__skb)->cb[0]))
+
+int create_cnt = 0;
+u32 delete_cnt = 0;
+u32 create_opt_cnt = 0;
+int delete_opt_cnt = 0;
+int create_force_cnt = 0;
+int macsec_ouput = 0;
+int macsec_input = 0;
+
+static int macsec_req_ctx_size(struct crypto_aead *aead, int sg_size)
+{
+ unsigned int len = 0;
+ len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
+
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * (sg_size);
+
+ return len;
+}
+
+static void *macsec_alloc_req_ctx( struct macsec_skb_cb *macsec_skb,
+ struct crypto_aead *aead,
+ int nfrags)
+{
+ void *ctx_data;
+ unsigned int len;
+ struct macsec_dev_ctx *ctx = macsec_skb->ctx;
+
+#if CONFIG_INET_MACSEC_NR_REQ_CACHE > 0
+ if (nfrags <= MACSEC_NFRAGS_CACHE) {
+ macsec_skb->flags |= 0x01;
+ if (atomic_read(&ctx->req_cache_cnt)) {
+ ctx_data = ctx->req_cache[ctx->req_cache_head];
+ ctx->req_cache_head = (ctx->req_cache_head + 1) %
+ MACSEC_REQ_CACHE_MAX;
+ atomic_dec(&ctx->req_cache_cnt);
+ create_opt_cnt++;
+ return ctx_data;
+ }
+ create_force_cnt++;
+ len = ctx->req_cache_size +
+ sizeof(struct scatterlist) * MACSEC_NFRAGS_CACHE;
+ ctx_data = kmalloc(len, GFP_ATOMIC);
+ } else {
+ create_cnt++;
+ macsec_skb->flags &= ~0x01;
+ len = ctx->req_cache_size +
+ sizeof(struct scatterlist) * nfrags;
+ ctx_data = kmalloc(len, GFP_ATOMIC);
+ }
+#else
+ len = ctx->req_cache_size +
+ sizeof(struct scatterlist) * nfrags;
+ ctx_data = kmalloc(len, GFP_ATOMIC);
+#endif
+ return ctx_data;
+}
+u32 glb_free_req_ctx_out = 0;
+u32 glb_free_req_ctx_in = 0;
+static void macsec_free_req_ctx(struct macsec_skb_cb *macsec_skb)
+{
+#if CONFIG_INET_MACSEC_NR_REQ_CACHE > 0
+ struct macsec_dev_ctx *ctx = macsec_skb->ctx;
+
+ if (macsec_skb->flags & 0x01) {
+ if (atomic_read(&ctx->req_cache_cnt) < MACSEC_REQ_CACHE_MAX) {
+ ctx->req_cache[ctx->req_cache_tail] = macsec_skb->req_ctx;
+ ctx->req_cache_tail = (ctx->req_cache_tail + 1) %
+ MACSEC_REQ_CACHE_MAX;
+ atomic_inc(&ctx->req_cache_cnt);
+ delete_opt_cnt++;
+ return;
+ }
+ }
+#endif
+ delete_cnt++;
+ kfree(macsec_skb->req_ctx);
+}
+
+static inline struct scatterlist *macsec_req_sg(struct crypto_aead *aead,
+ struct aead_request *req)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) (req + 1) +
+ crypto_aead_reqsize(aead), __alignof__(struct scatterlist));
+}
+
+__be16 macsec_type_trans(struct sk_buff *skb)
+{
+ struct macsec_ethhdr *eth;
+ eth = (struct macsec_ethhdr *)(skb->data - ETH_HLEN);
+ return eth->hdr.macsec_type;
+}
+
+int macsec_init_aead(struct macsec_dev_ctx *mdata)
+{
+ struct crypto_aead *aead;
+ int err;
+ char *alg_name = "macsec(gcm)";
+ char key[32] = { 0x88, 0xc5, 0x12, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x52, 0x05, 0x20, 0x9f, 0xe8, 0x6b, 0xf8,
+ 0x8e, 0x7f, 0xa3, 0xaa, 0x77, 0x89, 0x58, 0xd2,
+ 0x50, 0x61, 0x75, 0x72, 0x81, 0x39, 0x7f, 0xcc};
+ int key_len = 32;
+
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead)) {
+ printk("Failed to create aead transform for macsec(gcm)\n");
+ err = PTR_ERR(aead);
+ goto error;
+ }
+
+ mdata->aead = aead;
+
+ err = crypto_aead_setkey(aead, key, key_len);
+ if (err) {
+ printk("Failed to set key for macsec(gcm)\n");
+ goto error;
+ }
+
+ err = crypto_aead_setauthsize(aead, 24);
+ if (err) {
+ printk("Failed to set authsize for macsec(gcm)\n");
+ err = 0;
+ }
+error:
+ return err;
+}
+
+static void macsec_output_done_hw(struct crypto_async_request *base, int err)
+{
+ int ret;
+ struct sk_buff *skb = base->data;
+ struct net_device *dev = skb->dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (err < 0) {
+ macsec_free_req_ctx(MACSEC_SKB_CB(skb));
+ return;
+ }
+ glb_free_req_ctx_out++;
+ macsec_free_req_ctx(MACSEC_SKB_CB(skb));
+ ret = ops->ndo_start_xmit(skb, dev);
+}
+
+int macsec_ouput_hw(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct macsec_dev_ctx *data;
+ struct crypto_aead *aead;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ struct sk_buff *trailer;
+ void *macsec_req;
+ int clen;
+ int alen;
+ int nfrags;
+
+ err = -ENOMEM;
+
+ data = netdev_macsec_priv(dev);
+ aead = data->aead;
+ alen = crypto_aead_authsize(aead);
+
+ alen = 16;
+
+ if ((err = skb_cow_data(skb, alen /* + 8 */, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ MACSEC_SKB_CB(skb)->ctx = data;
+ macsec_req = macsec_alloc_req_ctx(MACSEC_SKB_CB(skb), aead, nfrags * 2);
+ if (!macsec_req)
+ goto error;
+ req = (struct aead_request*) macsec_req;
+
+ aead_request_set_tfm(req, aead);
+ sg = macsec_req_sg(aead, req);
+ dsg = sg + nfrags;
+
+ /* Setup SG */
+ skb_to_sgvec(skb, sg, 0, skb->len);
+
+ clen = skb->len;
+ pskb_put(skb, trailer, alen);
+ skb_push(skb, 8);
+ skb_to_sgvec(skb, dsg, 0, skb->len);
+
+ MACSEC_SKB_CB(skb)->req_ctx = macsec_req;
+
+ aead_request_set_callback(req, 0, macsec_output_done_hw, skb);
+ aead_request_set_crypt(req, sg, dsg, clen, NULL);
+ macsec_ouput++;
+ err = crypto_aead_encrypt(req);
+
+ if (err == -EINPROGRESS)
+ goto error;
+
+ if (err == -EAGAIN || err == -EBUSY) {
+ macsec_free_req_ctx(MACSEC_SKB_CB(skb));
+ err = NET_XMIT_DROP;
+ }
+
+error:
+ return err;
+
+}
+
+void macsec_done_input_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ int hlen = 22; /* ETH Header len + Macsec Secutity Tag(TCI + PN) */
+ int ret;
+ struct macsec_ethhdr *eth;
+
+ skb_reset_mac_header(skb);
+ eth = (struct macsec_ethhdr *)skb_mac_header(skb);
+ skb->protocol = eth->h_proto;
+
+ pskb_trim(skb, skb->len - 16 /* icv */);
+ __skb_pull(skb, hlen);
+ skb_reset_network_header(skb);
+ skb->transport_header = skb->network_header;
+
+ glb_free_req_ctx_in++;
+ macsec_free_req_ctx(MACSEC_SKB_CB(skb));
+ ret = macsec_netif_receive_skb(skb, skb->protocol);
+
+}
+
+int macsec_input_hw(struct sk_buff *skb)
+{
+ struct macsec_dev_ctx *data;
+ struct crypto_aead *aead;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ struct sk_buff *trailer;
+ void *macsec_req;
+ int clen;
+ int nfrags;
+ int eth_len = ETH_HLEN;
+ int err = -EINVAL;
+ int src_len;
+
+ data = netdev_macsec_priv(skb->dev);
+ aead = data->aead;
+
+ if(!aead)
+ goto error;
+
+ if (!pskb_may_pull(skb, eth_len))
+ goto error;
+
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ err = -ENOMEM;
+
+ MACSEC_SKB_CB(skb)->ctx = data;
+ macsec_req = macsec_alloc_req_ctx(MACSEC_SKB_CB(skb), aead, nfrags * 2);
+ if (!macsec_req)
+ goto error;
+ req = (struct aead_request*) macsec_req;
+ aead_request_set_tfm(req, aead);
+ sg = macsec_req_sg(aead, req);
+ dsg = sg + nfrags;
+
+ /* Setup SG */
+ sg_init_table(sg, nfrags);
+ skb_push(skb, eth_len);
+ clen = skb->len;
+ skb_to_sgvec(skb, sg, 0, clen);
+ src_len = clen;
+
+ sg_init_table(dsg, nfrags);
+ clen -= 16;
+ clen -= 8;
+ skb_to_sgvec(skb, dsg, 0, clen);
+ MACSEC_SKB_CB(skb)->req_ctx = macsec_req;
+
+ aead_request_set_callback(req, 0, macsec_done_input_hw, skb);
+ aead_request_set_crypt(req, sg, dsg, src_len, NULL);
+
+ //macsec_input++;
+ err = crypto_aead_decrypt(req);
+
+ if (err == -EINPROGRESS) {
+ macsec_input++;
+ goto error;
+ }
+ if (err == -EBUSY || err == -EAGAIN) {
+ macsec_free_req_ctx(MACSEC_SKB_CB(skb));
+ err = NET_XMIT_DROP;
+ }
+
+error:
+ return err;
+
+}
+
+static ssize_t macsec_console_driver_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ if (*buf == '1') {
+ printk("Printing the delete and create stats ="
+ "create_cnt = %d, delete_cnt = %d, "
+ "create_opt_cnt = %d, delete_opt_cnt = %d,create_force_cnt = %d\n,"
+ "glb_free_req_ctx_out = %d, glb_free_req_ctx_in = %d\n,"
+ "macsec_input = %d, macsec_ouput = %d\n",
+ create_cnt, delete_cnt, create_opt_cnt,
+ delete_opt_cnt, create_force_cnt, glb_free_req_ctx_out, glb_free_req_ctx_in,
+ macsec_input, macsec_ouput);
+ }
+ return 1;
+}
+
+static int macsec_console_driver_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int macsec_console_driver_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+struct file_operations macsec_console_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = macsec_console_driver_open,
+ .release = macsec_console_driver_release,
+ .write = macsec_console_driver_write,
+};
+
+#define MACSEC_CONSOLE_DRIVER_NAME "macsec"
+int macsec_console_module_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = create_proc_entry(MACSEC_CONSOLE_DRIVER_NAME, 0, NULL);
+ if (entry == NULL) {
+ printk(KERN_ERR "Macsec Proc entry failed !!\n");
+ return -1;
+ }
+
+ entry->proc_fops = &macsec_console_driver_fops;
+ printk("Macsec proc interface Initiliazed\n");
+ return 0;
+}
+
+void macsec_console_module_exit(void)
+{
+ remove_proc_entry(MACSEC_CONSOLE_DRIVER_NAME, NULL);
+}
+
+void macsec_destroy(struct net_device *dev)
+{
+ struct macsec_dev_ctx *macdata = dev->macsec_priv;
+
+ if (!macdata)
+ return;
+
+ crypto_free_aead(macdata->aead);
+ /* Delete request cache */
+ while ((atomic_dec_return(&macdata->req_cache_cnt)) > 0) {
+ kfree(macdata->req_cache[macdata->req_cache_head]);
+ macdata->req_cache_head = (macdata->req_cache_head + 1) %
+ MACSEC_REQ_CACHE_MAX;
+ }
+ dev->macsec_priv = NULL;
+ dev->macsec_output_hw = NULL;
+ dev->macsec_input_hw = NULL;
+
+ kfree(macdata);
+ printk("Macsec Session Destroyed\n");
+}
+
+int macsec_init_state(struct net_device *dev)
+{
+ struct macsec_dev_ctx *macdata;
+ int err;
+
+ macdata = kzalloc(sizeof(*macdata), GFP_KERNEL);
+ if (macdata == NULL)
+ return -ENOMEM;
+
+ dev->macsec_priv = macdata;
+ dev->macsec_output_hw = macsec_ouput_hw;
+ dev->macsec_input_hw = macsec_input_hw;
+
+ err = macsec_init_aead(macdata);
+ if (err)
+ goto out;
+
+#if CONFIG_INET_MACSEC_NR_REQ_CACHE > 0
+ atomic_set(&macdata->req_cache_cnt, 0);
+ macdata->req_cache_head = 0;
+ macdata->req_cache_tail = 0;
+#endif
+ macdata->req_cache_size = macsec_req_ctx_size(macdata->aead, 0);
+ printk("Macsec Session Established\n");
+out:
+ return err;
+
+}
+
+static int __init macsec_init(void)
+{
+ int ret;
+ ret = macsec_console_module_init();
+ if (ret) {
+ printk("Macsec proc driver could not initiliaze\n");
+ return ret;
+ }
+ printk("Registered Macsec Interface\n");
+ return 0;
+}
+
+static void __exit macsec_fini(void)
+{
+ macsec_console_module_exit();
+ printk("Unregistered Macsec Interface\n");
+}
+
+module_init(macsec_init);
+module_exit(macsec_fini);
+MODULE_LICENSE("GPL");
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ec85681a7dd..f5b46f6a970 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -73,9 +73,25 @@
static struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+
+static void sock_spd_buf_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+ struct sk_buff *skb = (struct sk_buff *)spd->partial[i].private;
+
+ kfree_skb(skb);
+}
+
+
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
+ struct sk_buff *skb = (struct sk_buff *) buf->private;
+
+ kfree_skb(skb);
+}
+static void sock_pipe_buf_release_1(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
put_page(buf->page);
}
@@ -1374,7 +1390,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
}
-
+#if 0
static inline struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
struct sk_buff *skb, struct sock *sk)
@@ -1488,7 +1504,6 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
return 0;
}
-
/*
* Map linear and fragment data from the skb to spd. It reports failure if the
* pipe is full or if we already spliced the requested length.
@@ -1521,7 +1536,6 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
return 0;
}
-
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list. It does NOT handle frag lists within
@@ -1539,7 +1553,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.partial = partial,
.flags = flags,
.ops = &sock_pipe_buf_ops,
- .spd_release = sock_spd_release,
+ .spd_release = sock_spd_buf_release,
};
struct sk_buff *frag_iter;
struct sock *sk = skb->sk;
@@ -1584,7 +1598,212 @@ done:
return 0;
}
+#else
+/*
+ * Fill page/offset/length into spd, if it can hold more pages.
+ */
+static inline int spd_fill_page_1(struct splice_pipe_desc *spd, struct page *page,
+ unsigned int len, unsigned int offset,
+ struct sk_buff *skb)
+{
+ if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+ return 1;
+
+ //get_page(page);
+ spd->pages[spd->nr_pages] = page;
+ spd->partial[spd->nr_pages].len = len;
+ spd->partial[spd->nr_pages].offset = offset;
+ spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
+ spd->nr_pages++;
+ return 0;
+}
+/*
+ * Map linear and fragment data from the skb to spd. Returns number of
+ * pages mapped.
+ */
+static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
+ unsigned int *total_len,
+ struct splice_pipe_desc *spd,
+ struct sock *sk)
+{
+ unsigned int nr_pages = spd->nr_pages;
+ unsigned int poff, plen, len, toff, tlen;
+ int headlen, seg;
+
+ toff = *offset;
+ tlen = *total_len;
+ if (!tlen)
+ goto err;
+
+ /*
+ * if the offset is greater than the linear part, go directly to
+ * the fragments.
+ */
+ headlen = skb_headlen(skb);
+ if (toff >= headlen) {
+ toff -= headlen;
+ goto map_frag;
+ }
+
+ /*
+ * first map the linear region into the pages/partial map, skipping
+ * any potential initial offset.
+ */
+ len = 0;
+ while (len < headlen) {
+ void *p = skb->data + len;
+
+ poff = (unsigned long) p & (PAGE_SIZE - 1);
+ plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff);
+ len += plen;
+
+ if (toff) {
+ if (plen <= toff) {
+ toff -= plen;
+ continue;
+ }
+ plen -= toff;
+ poff += toff;
+ toff = 0;
+ }
+
+ plen = min(plen, tlen);
+ if (!plen)
+ break;
+
+ /*
+ * just jump directly to update and return, no point
+ * in going over fragments when the output is full.
+ */
+ if (spd_fill_page_1(spd, virt_to_page(p), plen, poff, skb))
+ goto done;
+
+ tlen -= plen;
+ }
+
+ /*
+ * then map the fragments
+ */
+map_frag:
+ for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+ const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+
+ plen = f->size;
+ poff = f->page_offset;
+
+ if (toff) {
+ if (plen <= toff) {
+ toff -= plen;
+ continue;
+ }
+ plen -= toff;
+ poff += toff;
+ toff = 0;
+ }
+
+ plen = min(plen, tlen);
+ if (!plen)
+ break;
+
+ if (spd_fill_page_1(spd, f->page, plen, poff, skb))
+ break;
+
+ tlen -= plen;
+ }
+
+done:
+ if (spd->nr_pages - nr_pages) {
+ *offset = 0;
+ *total_len = tlen;
+ return 0;
+ }
+err:
+ return 1;
+}
+
+/*
+ * Map data from the skb to a pipe. Should handle both the linear part,
+ * the fragments, and the frag list. It does NOT handle frag lists within
+ * the frag list, if such a thing exists. We'd probably need to recurse to
+ * handle that cleanly.
+ */
+int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+ struct pipe_inode_info *pipe, unsigned int tlen,
+ unsigned int flags)
+{
+ struct partial_page partial[PIPE_BUFFERS];
+ struct page *pages[PIPE_BUFFERS];
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .flags = flags,
+ .ops = &sock_pipe_buf_ops,
+ .spd_release = sock_spd_buf_release,
+ };
+ struct sock *sk = __skb->sk;
+#if 1
+ struct sk_buff *skb;
+ /*
+ * I'd love to avoid the clone here, but tcp_read_sock()
+ * ignores reference counts and unconditonally kills the sk_buff
+ * on return from the actor.
+ */
+ skb = skb_clone(__skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+#endif
+ /*
+ * __skb_splice_bits() only fails if the output has no room left,
+ * so no point in going over the frag_list for the error case.
+ */
+ if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+ goto done;
+ else if (!tlen)
+ goto done;
+
+ /*
+ * now see if we have a frag_list to map
+ */
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list && tlen; list = list->next) {
+ if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
+ break;
+ }
+ }
+
+done:
+#if 1
+ /*
+ * drop our reference to the clone, the pipe consumption will
+ * drop the rest.
+ */
+ kfree_skb(skb);
+#endif
+ if (spd.nr_pages) {
+ int ret;
+
+ /*
+ * Drop the socket lock, otherwise we have reverse
+ * locking dependencies between sk_lock and i_mutex
+ * here as compared to sendfile(). We enter here
+ * with the socket lock held, and splice_to_pipe() will
+ * grab the pipe inode lock. For sendfile() emulation,
+ * we call into ->sendpage() with the i_mutex lock held
+ * and networking will grab the socket lock.
+ */
+ release_sock(sk);
+ ret = splice_to_pipe(pipe, &spd);
+ lock_sock(sk);
+ return ret;
+ }
+
+ return 0;
+}
+
+#endif
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035e..83a930e0ac9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -351,6 +351,19 @@ config INET_ESP
If unsure, say Y.
+config INET_ESP_NR_REQ_CACHE
+ int "Number of ESP request cache per connection"
+ range 0 256
+ depends on INET_ESP
+ default "0"
+ ---help---
+ Specify the number of ESP request crypto object cache per connection.
+ For ESP transport and tunnel modes, ESP request crypto cache object
+ can be cached instead returns to the memory pool after each packet
+ is being processed. This helps performance for slow processor with
+ memory cost. This value should be equal to the hardware offload
+ descriptor size.
+
config INET_IPCOMP
tristate "IP: IPComp transformation"
select INET_XFRM_TUNNEL
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 5c662703eb1..71150bff012 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -8,13 +8,152 @@
#include <linux/spinlock.h>
#include <net/icmp.h>
#include <net/protocol.h>
+#include <crypto/authenc.h>
+#include <linux/highmem.h>
+#include <crypto/hash.h>
+#define DEBUG_AH
+#ifndef DEBUG_AH
+# define AH_DUMP_PKT print_hex_dump
+#else
+# define AH_DUMP_PKT(arg...)
+#endif
+
+/**
+ * @brief SKB private data for AH stored in skb cb field
+ *
+ * @tmp_req - temporary ahash/aead request
+ * @icv_trunc_len - AH ICV length for software AH
+ * @nh - Next header for hardware offload AH
+ *
+ */
+struct ah_skb_cb {
+ void *tmp_req;
+ u16 icv_trunc_len;
+ u8 nh;
+};
+
+#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
+
+/**
+ * @brief AH work buffer (union) for software AH
+ * @iph - IP header access
+ * @buf - byte address access
+ * @note Used to save IP header and IP options
+ *
+ */
+union ah_tmp_iph {
+ struct iphdr iph;
+ char buf[60];
+};
+
+#define AH_WORK_BUF_MAX_LEN sizeof(union ah_tmp_iph)
+
+/*
+ * Allocate an ahash request structure with extra space for structure
+ * ah_tmp_iph (scatch pad), ICV (input save ICV), working ICV
+ * (space for hash algorithm to store ICV), and SG.
+ *
+ */
+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags)
+{
+ unsigned int len;
+
+ len = AH_WORK_BUF_MAX_LEN;
+ len += MAX_AH_AUTH_LEN;
+ len += crypto_ahash_digestsize(ahash);
+ len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
+ len += ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * nfrags;
+
+ return kmalloc(len, GFP_ATOMIC);
+}
+
+static inline void ah_free_tmp(void *tmp)
+{
+ kfree(tmp);
+}
+
+static inline union ah_tmp_iph *ah_tmp_work_buf(void *tmp)
+{
+ return tmp;
+}
+
+static inline u8 *ah_tmp_icv(union ah_tmp_iph *tmp)
+{
+ return (u8 *) (tmp + 1);
+}
+
+static inline u8 *ah_tmp_work_icv(u8 *tmp)
+{
+ return tmp + MAX_AH_AUTH_LEN;
+}
+
+static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
+ u8 *tmp)
+{
+ struct ahash_request *req = (struct ahash_request *) (tmp +
+ crypto_ahash_digestsize(ahash));
+ ahash_request_set_tfm(req, ahash);
+ return req;
+}
+
+static inline struct scatterlist *ah_tmp_sg(struct crypto_ahash *ahash,
+ struct ahash_request *req)
+{
+ return (void *) ALIGN((unsigned long) (req + 1) +
+ crypto_ahash_reqsize(ahash),
+ __alignof__(struct scatterlist));
+}
+
+/*
+ * Allocate an aead request structure with extra space for structure
+ * SG.
+ *
+ */
+static void *ah_alloc_aead_tmp(struct crypto_aead *aead, int nfrags)
+{
+ unsigned int len;
+
+ len = sizeof(struct aead_request) + crypto_aead_reqsize(aead);
+ len += ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * nfrags;
+
+ return kmalloc(len, GFP_ATOMIC);
+}
+
+static inline void ah_free_aead_tmp(void *tmp)
+{
+ kfree(tmp);
+}
+
+static inline struct aead_request *ah_tmp_aead_req(struct crypto_aead *aead,
+ void *tmp)
+{
+ struct aead_request *req = (struct aead_request *) tmp;
+ aead_request_set_tfm(req, aead);
+ return req;
+}
+
+static inline struct scatterlist *ah_tmp_aead_sg(struct crypto_aead *aead,
+ struct aead_request *req)
+{
+ return (void *) ALIGN((unsigned long) (req + 1) +
+ crypto_aead_reqsize(aead),
+ __alignof__(struct scatterlist));
+}
+static inline struct scatterlist *ah_tmp_aead_dsg(struct scatterlist *sg,
+ unsigned int nfrags)
+{
+ return (void *) ((unsigned long) sg +
+ sizeof(struct scatterlist) * nfrags);
+
+}
/* Clear mutable options and find final destination to substitute
* into IP header for icv calculation. Options are already checked
* for validity, so paranoia is not required. */
-
-static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
+int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
{
unsigned char * optptr = (unsigned char*)(iph+1);
int l = iph->ihl*4 - sizeof(struct iphdr);
@@ -53,27 +192,133 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
}
return 0;
}
+EXPORT_SYMBOL_GPL(ip_clear_mutable_options);
-static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
+/*******************************************************************************
+ * AH Software Functions
+ *
+ *******************************************************************************
+ */
+static int ah_output_done2(struct sk_buff *skb, int err)
+{
+ void *req_tmp = AH_SKB_CB(skb)->tmp_req;
+ struct iphdr *iph;
+ struct iphdr *top_iph;
+ union ah_tmp_iph *tmp_iph;
+ struct ip_auth_hdr *ah;
+ char *icv;
+ char *work_icv;
+
+ if (err < 0)
+ goto out;
+
+ tmp_iph = ah_tmp_work_buf(req_tmp);
+ icv = ah_tmp_icv(tmp_iph);
+ work_icv = ah_tmp_work_icv(icv);
+ iph = &tmp_iph->iph;
+ top_iph = ip_hdr(skb);
+ ah = ip_auth_hdr(skb);
+
+ /* Set ICV in AH header */
+ memcpy(ah->auth_data, work_icv, AH_SKB_CB(skb)->icv_trunc_len);
+
+ /* Restore mute fields */
+ top_iph->tos = iph->tos;
+ top_iph->ttl = iph->ttl;
+ top_iph->frag_off = iph->frag_off;
+ if (top_iph->ihl != 5) {
+ top_iph->daddr = iph->daddr;
+ memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
+ }
+
+ AH_DUMP_PKT(KERN_INFO, "AH output sw done: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
+
+out:
+ kfree(req_tmp);
+ return err;
+}
+
+static void ah_output_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_output_resume(skb, ah_output_done2(skb, err));
+}
+
+static int ah_output_sw(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct iphdr *iph, *top_iph;
struct ip_auth_hdr *ah;
- struct ah_data *ahp;
- union {
- struct iphdr iph;
- char buf[60];
- } tmp_iph;
+ struct ah_data *ahp;
+ struct ahash_request *areq;
+ struct scatterlist *sg;
+ int nfrags;
+ void *req_tmp = NULL;
+ union ah_tmp_iph *tmp_iph;
+ char *icv;
+ char *work_icv;
+ struct sk_buff *trailer;
+
+ /* SKB transport, network, and mac header pointers are set by
+ transport or tunnel modules.
+
+ Transport Input:
+ -----------------------
+ | IP | Rsvd | Payload |
+ -----------------------
+ ^
+ |
+ skb.data
+
+ Tunnel Input:
+ ----------------------------------------
+ | Outer IP | Rsvd | Inner IP | Payload |
+ ----------------------------------------
+ ^
+ |
+ skb.data
+ */
+
+ AH_DUMP_PKT(KERN_INFO, "AH output sw : ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
skb_push(skb, -skb_network_offset(skb));
+
+ /* Find # of fragments */
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ /* Allocate temp request */
+ ahp = x->data;
+ req_tmp = ah_alloc_tmp(ahp->utfm.atfm, nfrags);
+ if (!req_tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ AH_SKB_CB(skb)->tmp_req = req_tmp;
+ AH_SKB_CB(skb)->icv_trunc_len = ahp->icv_trunc_len;
+ tmp_iph = ah_tmp_work_buf(req_tmp);
+ icv = ah_tmp_icv(tmp_iph);
+ work_icv = ah_tmp_work_icv(icv);
+ areq = ah_tmp_req(ahp->utfm.atfm, work_icv);
+ sg = ah_tmp_sg(ahp->utfm.atfm, areq);
+
top_iph = ip_hdr(skb);
- iph = &tmp_iph.iph;
+ iph = &tmp_iph->iph;
+ /* Save IP header to compute hash */
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
-
if (top_iph->ihl != 5) {
+ if ((top_iph->ihl << 2) > AH_WORK_BUF_MAX_LEN) {
+ err = -EINVAL;
+ goto error;
+ }
iph->daddr = top_iph->daddr;
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
@@ -81,85 +326,198 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
}
+ /* Set AH header */
ah = ip_auth_hdr(skb);
ah->nexthdr = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_AH;
+ /* Mute field for hash */
top_iph->tos = 0;
top_iph->tot_len = htons(skb->len);
top_iph->frag_off = 0;
top_iph->ttl = 0;
top_iph->check = 0;
- ahp = x->data;
+ /* Set AH fields */
ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
-
ah->reserved = 0;
- ah->spi = x->id.spi;
- ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+ ah->spi = x->id.spi;
+ ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
- spin_lock_bh(&x->lock);
- err = ah_mac_digest(ahp, skb, ah->auth_data);
- memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
- spin_unlock_bh(&x->lock);
+ /* Mute AH for hash */
+ memset(ah->auth_data, 0, ahp->icv_trunc_len);
- if (err)
+ /* Setup SG for hash op */
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+ ahash_request_set_callback(areq, 0, ah_output_done, skb);
+ ahash_request_set_crypt(areq, sg, work_icv, skb->len);
+
+ err = crypto_ahash_digest(areq);
+ if (err == -EINPROGRESS)
+ goto out;
+ if (err < 0)
goto error;
- top_iph->tos = iph->tos;
- top_iph->ttl = iph->ttl;
- top_iph->frag_off = iph->frag_off;
- if (top_iph->ihl != 5) {
- top_iph->daddr = iph->daddr;
- memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
+ return ah_output_done2(skb, err);
+
+error:
+ if (req_tmp)
+ ah_free_tmp(req_tmp);
+out:
+ return err;
+}
+
+static int ah_input_done2(struct sk_buff *skb, int err)
+{
+ void *req_tmp = AH_SKB_CB(skb)->tmp_req;
+ struct iphdr *top_iph;
+ struct ip_auth_hdr *ah;
+ union ah_tmp_iph *tmp_iph;
+ int ah_hlen;
+ int ihl;
+ char *icv;
+ char *work_icv;
+ int nexthdr;
+
+ if (err < 0)
+ goto out;
+
+ tmp_iph = ah_tmp_work_buf(req_tmp);
+ icv = ah_tmp_icv(tmp_iph);
+ work_icv = ah_tmp_work_icv(icv);
+
+ /* Verify ICV */
+ if (memcmp(icv, work_icv, AH_SKB_CB(skb)->icv_trunc_len)) {
+ err = -EBADMSG;
+ goto out;
}
- err = 0;
+ top_iph = ip_hdr(skb);
+ ihl = top_iph->ihl << 2;
+ ah = (struct ip_auth_hdr *) ((u8 *) top_iph + ihl);
+ nexthdr = ah->nexthdr;
+ ah_hlen = (ah->hdrlen + 2) << 2;
+
+ /* Remove AH header */
+ skb->network_header += ah_hlen;
+ memcpy(skb_network_header(skb), tmp_iph->buf, ihl);
+ skb->transport_header = skb->network_header;
+ __skb_pull(skb, ah_hlen + ihl);
+
+ err = nexthdr;
-error:
+ AH_DUMP_PKT(KERN_INFO, "AH input sw done: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
+
+out:
+ kfree(req_tmp);
return err;
}
-static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
+static void ah_input_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_input_resume(skb, ah_input_done2(skb, err));
+}
+
+static int ah_input_sw(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
int ihl;
int nexthdr;
int err = -EINVAL;
- struct iphdr *iph;
- struct ip_auth_hdr *ah;
- struct ah_data *ahp;
- char work_buf[60];
+ struct iphdr *iph;
+ struct ip_auth_hdr *ah;
+ struct ah_data *ahp;
+ struct sk_buff *trailer;
+ struct ahash_request *areq;
+ struct scatterlist *sg;
+ union ah_tmp_iph *tmp_iph;
+ int nfrags;
+ void *req_tmp = NULL;
+ char *icv;
+ char *work_icv;
+
+ /* SKB transport, network, and mac header pointers are set by
+ transport or tunnel modules.
+
+ Transport Input:
+ -----------------------
+ | IP | AH | Payload |
+ -----------------------
+ ^
+ |
+ skb.data
+
+ Tunnel Input:
+ ----------------------------------------
+ | Outer IP | AH | Inner IP | Payload |
+ ----------------------------------------
+ ^
+ |
+ skb.data
+ */
+
+ AH_DUMP_PKT(KERN_INFO, "AH input sw : ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
if (!pskb_may_pull(skb, sizeof(*ah)))
- goto out;
+ goto error;
- ah = (struct ip_auth_hdr *)skb->data;
+ ah = (struct ip_auth_hdr *)skb->data;
ahp = x->data;
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
- goto out;
+ goto error;
if (!pskb_may_pull(skb, ah_hlen))
- goto out;
+ goto error;
/* We are going to _remove_ AH header to keep sockets happy,
* so... Later this can change. */
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
- goto out;
+ goto error;
+
+ /* Find # of fragment */
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto error;
+ nfrags = err;
skb->ip_summed = CHECKSUM_NONE;
- ah = (struct ip_auth_hdr *)skb->data;
+ ah = (struct ip_auth_hdr *)skb->data;
iph = ip_hdr(skb);
+ /* Allocate temp ahash request */
+ req_tmp = ah_alloc_tmp(ahp->utfm.atfm, nfrags);
+ if (!req_tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+ AH_SKB_CB(skb)->tmp_req = req_tmp;
+ AH_SKB_CB(skb)->icv_trunc_len = ahp->icv_trunc_len;
+ tmp_iph = ah_tmp_work_buf(req_tmp);
+ icv = ah_tmp_icv(tmp_iph);
+ work_icv = ah_tmp_work_icv(icv);
+ areq = ah_tmp_req(ahp->utfm.atfm, work_icv);
+ sg = ah_tmp_sg(ahp->utfm.atfm, areq);
+
ihl = skb->data - skb_network_header(skb);
- memcpy(work_buf, iph, ihl);
+ if (ihl > AH_WORK_BUF_MAX_LEN) {
+ err = -EBADMSG;
+ goto error;
+ }
+ /* Save IP header for hash computation */
+ memcpy(tmp_iph->buf, iph, ihl);
+
+ /* Mute fields for hash op */
iph->ttl = 0;
iph->tos = 0;
iph->frag_off = 0;
@@ -167,43 +525,339 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
if (ihl > sizeof(*iph)) {
__be32 dummy;
if (ip_clear_mutable_options(iph, &dummy))
- goto out;
+ goto error;
}
- spin_lock(&x->lock);
- {
- u8 auth_data[MAX_AH_AUTH_LEN];
+ /* Save ICV */
+ memcpy(icv, ah->auth_data, ahp->icv_trunc_len);
+ /* Mute ICV for hash op */
+ memset(ah->auth_data, 0, ahp->icv_trunc_len);
+ /* Add back IP header for SG */
+ skb_push(skb, ihl);
- memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
- skb_push(skb, ihl);
- err = ah_mac_digest(ahp, skb, ah->auth_data);
- if (err)
- goto unlock;
- if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len))
- err = -EBADMSG;
+ /* Setup SG */
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+ ahash_request_set_callback(areq, 0, ah_input_done, skb);
+ ahash_request_set_crypt(areq, sg, work_icv, skb->len);
+
+ err = crypto_ahash_digest(areq);
+ if (err == -EINPROGRESS)
+ goto out;
+ if (err < 0)
+ goto error;
+
+ return ah_input_done2(skb, err);
+
+error:
+ if (req_tmp)
+ ah_free_tmp(req_tmp);
+out:
+ return err;
+}
+
+/*******************************************************************************
+ * AH HW Offload Functions
+ *
+ *******************************************************************************
+ */
+static int ah_output_done2_hw(struct sk_buff *skb, int err)
+{
+ void *req_tmp = AH_SKB_CB(skb)->tmp_req;
+
+ if (err < 0)
+ goto out;
+
+ AH_DUMP_PKT(KERN_INFO, "AH output hw: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
+
+out:
+ kfree(req_tmp);
+ return err;
+}
+
+static void ah_output_done_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_output_resume(skb, ah_output_done2_hw(skb, err));
+}
+
+static int ah_output_hw(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ah_data *ahp;
+ struct aead_request *areq;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ struct sk_buff *trailer;
+ void *req_tmp = NULL;
+ int err;
+ int nfrags;
+ unsigned int clen;
+
+ /* For AH transport mode, skb.data is at IP header. skb.len
+ includes IP header and payload. skb network header, transport
+ header, and mac headers are updated by transport module code.
+
+ Input:
+ --------------------------------------------
+ | Network Hdr| Transport Hdr| IP | Payload |
+ --------------------------------------------
+ ^
+ |
+ skb.data
+
+ For AH tunnel mode, outer IP header is formed by tunnel module.
+ skb network header, transport header, and mac header are updated
+ by tunnel module code.
+
+ Input:
+ -----------------------------------------------------
+ | Outer IP | Rsvd | inner IP Header | Payload |
+ -----------------------------------------------------
+ ^
+ |
+ skb.data
+ */
+
+ ahp = x->data;
+
+ /* Find # fragment */
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ /* Allocate temp request */
+ req_tmp = ah_alloc_aead_tmp(ahp->utfm.aeadtfm, 2 * nfrags);
+ if (!req_tmp) {
+ err = -ENOMEM;
+ goto error;
}
-unlock:
- spin_unlock(&x->lock);
- if (err)
+ AH_SKB_CB(skb)->tmp_req = req_tmp;
+ areq = ah_tmp_aead_req(ahp->utfm.aeadtfm, req_tmp);
+ sg = ah_tmp_aead_sg(ahp->utfm.aeadtfm, areq);
+ dsg = ah_tmp_aead_dsg(sg, nfrags);
+ /* Set up SG - data will start at IP (inner) header (skb.data) */
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+ clen = skb->len;
+ skb_push(skb, -skb_network_offset(skb));
+ skb_to_sgvec(skb, dsg, 0, skb->len);
+ aead_request_set_callback(areq, 0, ah_output_done_hw, skb);
+ aead_request_set_crypt(areq, sg, dsg, clen, NULL);
+
+ /* For AH transport mode, SG is at IP header.
+
+ Input:
+ ----------------------
+ | Rsvd| IP | Payload |
+ ----------------------
+ Rsvd - space reserved for moved IP and added AH
+
+ Output:
+ ---------------------
+ | IP | AH | Payload |
+ ---------------------
+
+ For AH tunnel mode, outer IP header is formed by tunnel module.
+ SG is at inner IP header.
+
+ Input:
+ ----------------------------------------
+ | Outer IP | Rsvd | inner IP | Payload |
+ ----------------------------------------
+ Rsvd - space reserved for added AH
+
+ Output:
+ ----------------------------------------
+ | Outer IP | AH | inner IP | Payload |
+ ----------------------------------------
+
+ */
+ err = crypto_aead_encrypt(areq);
+ if (err == -EINPROGRESS)
goto out;
+ if (err < 0)
+ goto error;
- skb->network_header += ah_hlen;
- memcpy(skb_network_header(skb), work_buf, ihl);
- skb->transport_header = skb->network_header;
- __skb_pull(skb, ah_hlen + ihl);
+ return ah_output_done2_hw(skb, err);
+
+error:
+ if (req_tmp)
+ ah_free_tmp(req_tmp);
+out:
+ return err;
+}
+
+static int ah_input_done2_hw(struct sk_buff *skb, int err)
+{
+ void *req_tmp = AH_SKB_CB(skb)->tmp_req;
- return nexthdr;
+ if (err < 0)
+ goto out;
+
+ err = AH_SKB_CB(skb)->nh;
+
+ AH_DUMP_PKT(KERN_INFO, "AH input hw: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
out:
+ kfree(req_tmp);
return err;
}
+static void ah_input_done_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_input_resume(skb, ah_input_done2_hw(skb, err));
+}
+
+static int ah_input_hw(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int ah_hlen;
+ int ihl;
+ int err = -EINVAL;
+ struct ip_auth_hdr *ah;
+ struct ah_data *ahp;
+ struct sk_buff *trailer;
+ struct aead_request *areq;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ int nfrags;
+ void *req_tmp = NULL;
+
+ /* For AH transport/tunnel mode, skb.data is at AH header. skb.len
+ includes payload. skb network header, transport header, and
+ mac headers will be updated by transport module code.
+
+ Transport Input:
+ -------------------------
+ | IP Hdr | AH | Payload |
+ -------------------------
+ ^
+ |
+ skb.data and length start here
+
+ Tunnel Input:
+ ------------------------------------
+ |Outer IP | AH | inner IP | Payload|
+ ------------------------------------
+ ^
+ |
+ skb.data and length start here
+ */
+
+ AH_DUMP_PKT(KERN_INFO, "AH input hw : ", DUMP_PREFIX_ADDRESS,
+ 16, 4, skb->data, skb->len, 1);
+
+ if (skb_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto error;
+
+ /* Find # of fragment */
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ ihl = skb->data - skb_network_header(skb);
+ ah = (struct ip_auth_hdr *) skb->data;
+ ah_hlen = (ah->hdrlen + 2) << 2;
+ AH_SKB_CB(skb)->nh = ah->nexthdr;
+
+ /* Allocate temp request */
+ ahp = x->data;
+ req_tmp = ah_alloc_aead_tmp(ahp->utfm.aeadtfm, 2 * nfrags);
+ if (!req_tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+ AH_SKB_CB(skb)->tmp_req = req_tmp;
+ areq = ah_tmp_aead_req(ahp->utfm.aeadtfm, req_tmp);
+ sg = ah_tmp_aead_sg(ahp->utfm.aeadtfm, areq);
+ dsg = ah_tmp_aead_dsg(sg, nfrags);
+
+ /* Init SG - data starts at AH header */
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, -ihl, skb->len + ihl);
+ skb->network_header += ah_hlen;
+ skb->transport_header = skb->network_header;
+ __skb_pull(skb, ah_hlen);
+
+ skb_to_sgvec(skb, dsg, -ihl, skb->len + ihl);
+ aead_request_set_callback(areq, 0, ah_input_done_hw, skb);
+ aead_request_set_crypt(areq, sg, dsg, skb->len + ah_hlen + ihl, NULL);
+
+ /* For AH transport/tunnel mode, SG is at IP header.
+
+ Transport Input:
+ ----------------------------
+ | IP Hdr | AH | Payload |
+ ----------------------------
+ IP Hdr - start of SG
+
+ Transport Output:
+ ----------------------------
+ | | IP Hdr | Payload |
+ ----------------------------
+
+ Tunnel Input:
+ -------------------------------------
+ | Outer IP | AH | inner IP | Payload|
+ -------------------------------------
+ Outer IP Hdr - start of SG
+
+ Tunnel Output:
+ -------------------------------------
+ | Outer IP | AH | inner IP | Payload|
+ -------------------------------------
+ Outer IP and AH left un-touch
+
+ */
+ err = crypto_aead_decrypt(areq);
+ if (err == -EINPROGRESS)
+ goto out;
+
+ if (err < 0)
+ goto error;
+
+ return ah_input_done2(skb, err);
+
+error:
+ if (req_tmp)
+ ah_free_tmp(req_tmp);
+out:
+ return err;
+}
+
+static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
+{
+ if ((x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) &&
+ (x->alg_flags & (XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL |
+ XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT)))
+ return ah_output_hw(x, skb);
+ else
+ return ah_output_sw(x, skb);
+}
+
+static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
+{
+ if ((x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) &&
+ (x->alg_flags & (XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL |
+ XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT)))
+ return ah_input_hw(x, skb);
+ else
+ return ah_input_sw(x, skb);
+}
+
static void ah4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
- struct iphdr *iph = (struct iphdr *)skb->data;
- struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+ struct iphdr *iph = (struct iphdr*)skb->data;
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
@@ -220,9 +874,19 @@ static void ah4_err(struct sk_buff *skb, u32 info)
static int ah_init_state(struct xfrm_state *x)
{
- struct ah_data *ahp = NULL;
+ struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
- struct crypto_hash *tfm;
+ struct crypto_ahash *ahashtfm;
+ struct crypto_aead *aeadtfm;
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ char *key;
+ int key_len;
+ int digest_size;
+ struct rtattr *rta;
+ struct ah_param {
+ __be32 spi;
+ __be32 seq;
+ } *param;
if (!x->aalg)
goto error;
@@ -234,40 +898,98 @@ static int ah_init_state(struct xfrm_state *x)
if (ahp == NULL)
return -ENOMEM;
- tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto error;
+ /* Try AH hardware offload first */
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ snprintf(alg_name, ARRAY_SIZE(alg_name),
+ "tunnel(ah(%s))", x->aalg->alg_name);
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_AH;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ snprintf(alg_name, ARRAY_SIZE(alg_name),
+ "transport(ah(%s))", x->aalg->alg_name);
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_AH;
+ break;
+ default:
+ strncpy(alg_name, x->aalg->alg_name, ARRAY_SIZE(alg_name));
+ break;
+ }
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) {
+ aeadtfm = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aeadtfm)) {
+ /* No AH hardware offload, go to software AH */
+ x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_AH);
+ aeadtfm = NULL;
+ ahashtfm = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
+ if (IS_ERR(ahashtfm))
+ goto error;
+ ahp->utfm.atfm = ahashtfm;
+ } else {
+ ahashtfm = NULL;
+ ahp->utfm.aeadtfm = aeadtfm;
+ }
+ } else {
+ aeadtfm = NULL;
+ ahashtfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahashtfm))
+ goto error;
+ ahp->utfm.atfm = ahashtfm;
+ }
- ahp->tfm = tfm;
- if (crypto_hash_setkey(tfm, x->aalg->alg_key,
- (x->aalg->alg_key_len + 7) / 8))
- goto error;
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_AH) {
+ /* For AH offload, we must load AH offload parameters
+ via setkey function. */
+ key_len = RTA_SPACE(sizeof(*param)) +
+ ((x->aalg->alg_key_len + 7) / 8);
+ key = kmalloc(key_len, GFP_KERNEL);
+ rta = (void *) key;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->spi = cpu_to_be32(x->id.spi);
+ param->seq = cpu_to_be32(x->replay.oseq);
+ memcpy(key + RTA_SPACE(sizeof(*param)),
+ x->aalg->alg_key,
+ (x->aalg->alg_key_len + 7) / 8);
+ if (crypto_aead_setkey(aeadtfm, key, key_len))
+ goto error;
+ digest_size = crypto_aead_tfm(aeadtfm)->__crt_alg->
+ cra_aead.maxauthsize;
+ } else {
+ key_len = (x->aalg->alg_key_len + 7) / 8;
+ key = x->aalg->alg_key;
+ if (crypto_ahash_setkey(ahashtfm, key, key_len))
+ goto error;
+ digest_size = crypto_ahash_digestsize(ahashtfm);
+ }
/*
* Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
- * after a successful crypto_alloc_hash().
+ * after a successful crypto_alloc_ahash().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
- if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
- crypto_hash_digestsize(tfm)) {
+ if (aalg_desc->uinfo.auth.icv_fullbits/8 != digest_size) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
- x->aalg->alg_name, crypto_hash_digestsize(tfm),
+ x->aalg->alg_name, digest_size,
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
-
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
- ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
- if (!ahp->work_icv)
- goto error;
+ /* For AH hardware offload, set ICV size */
+ if (aeadtfm)
+ crypto_aead_setauthsize(aeadtfm, ahp->icv_trunc_len);
x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
ahp->icv_trunc_len);
@@ -279,8 +1001,7 @@ static int ah_init_state(struct xfrm_state *x)
error:
if (ahp) {
- kfree(ahp->work_icv);
- crypto_free_hash(ahp->tfm);
+ crypto_free_ahash(ahp->utfm.atfm);
kfree(ahp);
}
return -EINVAL;
@@ -293,12 +1014,11 @@ static void ah_destroy(struct xfrm_state *x)
if (!ahp)
return;
- kfree(ahp->work_icv);
- crypto_free_hash(ahp->tfm);
+ crypto_free_ahash(ahp->utfm.atfm);
+ ahp->utfm.atfm = NULL;
kfree(ahp);
}
-
static const struct xfrm_type ah_type =
{
.description = "AH4",
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 12f7287e902..ffb24aeeaef 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -15,45 +15,127 @@
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/udp.h>
+#include <linux/highmem.h>
+
+//#define DEBUG_ESP
+#ifdef DEBUG_ESP
+# define ESP_DUMP_PKT print_hex_dump
+# define ESP_PRINTK printk
+#else
+# define ESP_DUMP_PKT(arg...)
+# define ESP_PRINTK(arg...)
+#endif
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
- void *tmp;
+ void *req_ctx;
+ struct esp_data *esp;
+ char flags;
};
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+struct esp_offload_param {
+ unsigned short encap_sport;
+ unsigned short encap_dport;
+ unsigned char nh;
+ unsigned char pad_len;
+} __attribute__((packed));
+
+#define ESP_OFFLOAD_INFO_SIZE ALIGN(sizeof(struct esp_offload_param), 4)
+
/*
- * Allocate an AEAD request structure with extra space for SG and IV.
+ * Allocate an AEAD request structure with extra space for SG, IV, and 16
+ * bytes info data for HW offload.
*
* For alignment considerations the IV is placed at the front, followed
* by the request and finally the SG list.
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+
+static int esp_req_ctx_size(struct crypto_aead *aead, int nfrags)
{
unsigned int len;
- len = crypto_aead_ivsize(aead);
- if (len) {
- len += crypto_aead_alignmask(aead) &
- ~(crypto_tfm_ctx_alignment() - 1);
- len = ALIGN(len, crypto_tfm_ctx_alignment());
- }
-
+ len = ESP_OFFLOAD_INFO_SIZE;
+ len += crypto_aead_ivsize(aead);
+ len += crypto_aead_alignmask(aead) &
+ ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+
len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
len = ALIGN(len, __alignof__(struct scatterlist));
len += sizeof(struct scatterlist) * nfrags;
- return kmalloc(len, GFP_ATOMIC);
+ return len;
+}
+
+static void *esp_alloc_req_ctx( struct esp_skb_cb *esp_skb,
+ struct crypto_aead *aead,
+ int nfrags)
+{
+ void *ctx;
+ unsigned int len;
+ struct esp_data *esp = esp_skb->esp;
+
+#if CONFIG_INET_ESP_NR_REQ_CACHE > 0
+ if (nfrags <= ESP_NFRAGS_CACHE) {
+ esp_skb->flags |= 0x01;
+ if (atomic_read(&esp->req_cache_cnt)) {
+ ctx = esp->req_cache[esp->req_cache_head];
+ esp->req_cache_head = (esp->req_cache_head + 1) %
+ ESP_REQ_CACHE_MAX;
+ atomic_dec(&esp->req_cache_cnt);
+ return ctx;
+ }
+ len = esp->req_cache_size +
+ sizeof(struct scatterlist) * ESP_NFRAGS_CACHE;
+ ctx = kmalloc(len, GFP_ATOMIC);
+ } else {
+ esp_skb->flags &= ~0x01;
+ len = esp->req_cache_size +
+ sizeof(struct scatterlist) * nfrags;
+ ctx = kmalloc(len, GFP_ATOMIC);
+ }
+#else
+ len = esp->req_cache_size +
+ sizeof(struct scatterlist) * nfrags;
+ ctx = kmalloc(len, GFP_ATOMIC);
+#endif
+ return ctx;
+}
+
+static void esp_free_req_ctx(struct esp_skb_cb *esp_skb)
+{
+#if CONFIG_INET_ESP_NR_REQ_CACHE > 0
+ struct esp_data *esp = esp_skb->esp;
+
+ if (esp_skb->flags & 0x01) {
+ if (atomic_read(&esp->req_cache_cnt) < ESP_REQ_CACHE_MAX) {
+ esp->req_cache[esp->req_cache_tail] = esp_skb->req_ctx;
+ esp->req_cache_tail = (esp->req_cache_tail + 1) %
+ ESP_REQ_CACHE_MAX;
+ atomic_inc(&esp->req_cache_cnt);
+ return;
+ }
+ }
+#endif
+ kfree(esp_skb->req_ctx);
+}
+
+static inline void *esp_tmp_offload_info(void *tmp)
+{
+ return tmp;
}
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+ PTR_ALIGN((u8 *) tmp + ESP_OFFLOAD_INFO_SIZE,
+ crypto_aead_alignmask(aead) + 1) :
+ ((u8 *) tmp + ESP_OFFLOAD_INFO_SIZE);
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -95,15 +177,14 @@ static inline struct scatterlist *esp_givreq_sg(
static void esp_output_done(struct crypto_async_request *base, int err)
{
- struct sk_buff *skb = base->data;
-
- kfree(ESP_SKB_CB(skb)->tmp);
+ struct sk_buff *skb = base->data;
+
+ esp_free_req_ctx(ESP_SKB_CB(skb));
xfrm_output_resume(skb, err);
}
-static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+static int esp_output_sw(struct xfrm_state *x, struct sk_buff *skb)
{
- int err;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_givcrypt_request *req;
@@ -112,8 +193,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp;
struct sk_buff *trailer;
void *tmp;
- u8 *iv;
- u8 *tail;
+ int err;
+ u8 *iv;
+ u8 *tail;
int blksize;
int clen;
int alen;
@@ -139,7 +221,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
nfrags = err;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+ ESP_SKB_CB(skb)->esp = esp;
+
+ tmp = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags + 1);
if (!tmp)
goto error;
@@ -157,6 +241,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
} while (0);
tail[clen - skb->len - 2] = (clen - skb->len) - 2;
tail[clen - skb->len - 1] = *skb_mac_header(skb);
+
pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb));
@@ -197,7 +282,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
*skb_mac_header(skb) = IPPROTO_UDP;
}
-
esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
@@ -210,28 +294,265 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
- aead_givcrypt_set_giv(req, esph->enc_data,
+ aead_givcrypt_set_giv(req, esph->enc_data,
XFRM_SKB_CB(skb)->seq.output);
- ESP_SKB_CB(skb)->tmp = tmp;
+ ESP_SKB_CB(skb)->req_ctx = tmp;
+
err = crypto_aead_givencrypt(req);
if (err == -EINPROGRESS)
goto error;
if (err == -EBUSY)
err = NET_XMIT_DROP;
+
+
+ esp_free_req_ctx(ESP_SKB_CB(skb));
+
+error:
+ return err;
+}
+
+static int esp_output_done2_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ void *esp_req = ESP_SKB_CB(skb)->req_ctx;
+ struct esp_offload_param *param;
+
+ if (err >= 0) {
+ err = 0;
+ }
+ else {
+ if ( err < 0)
+ goto out;
+ }
+ param = esp_tmp_offload_info(esp_req);
+ if (param->encap_sport) {
+ /* UDP Encapsulation - Fill in dynamic fields */
+ struct iphdr *iph = ip_hdr(skb);
+ int ihl = iph->ihl << 2;
+ struct udphdr *uh = (struct udphdr *) (((u8 *) iph) + ihl);
+ uh->source = param->encap_sport;
+ uh->dest = param->encap_dport;
+ *skb_mac_header(skb) = IPPROTO_UDP;
+ } else {
+ *skb_mac_header(skb) = IPPROTO_ESP;
+ }
+out:
+ esp_free_req_ctx(ESP_SKB_CB(skb));
+ return err;
+}
+
+static void esp_output_done_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_output_resume(skb, esp_output_done2_hw(base, err));
+}
+
+static int esp_output_hw(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct crypto_aead *aead;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ struct esp_data *esp;
+ struct sk_buff *trailer;
+ struct esp_offload_param *param;
+ void *esp_req;
+ int err;
+ u8 *iv;
+ int clen;
+ int alen;
+ int pad_len;
+ int nfrags;
+
+ /* For ESP transport mode, skb.data is at IP header. skb.len
+ includes IP header and payload. skb network header, transport
+ header, and mac headers pointer are updated by transport module
+ code.
+
+ Input:
+ --------------------------------------------
+ | Network Hdr| Transport Hdr| IP | Payload |
+ --------------------------------------------
+ ^
+ |
+ skb.data
+
+ For ESP tunnel mode, outer IP header is formed by tunnel module.
+ skb network header, transport header, and mac header pointer are
+ updated by tunnel module code.
+
+ Input:
+ -----------------------------------------------------
+ | Outer IP | reserved1 | inner IP Header | Payload |
+ -----------------------------------------------------
+ ^
+ |
+ skb.data
+
+ Outer IP - Formed by tunnel mode code
+ reserved1* - space reserved for UDP, ESP, SEQ, and IV
+ */
+
+ err = -ENOMEM;
+
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+
+ /* Compute pad length to expand skb tail for padding by HW */
+ if (x->props.mode == XFRM_MODE_TUNNEL) {
+ clen = skb->len;
+ } else {
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ clen = skb->len - (iph->ihl << 2);
+ }
+
+ pad_len = ALIGN(clen + 2, ALIGN(crypto_aead_blocksize(aead), 4));
+ if (esp->padlen)
+ pad_len = ALIGN(pad_len, esp->padlen);
+ pad_len -= clen;
+
+ /* Expand tailer to include padding and ICV */
+ if ((err = skb_cow_data(skb, pad_len + alen, &trailer)) < 0)
+ goto error;
+ nfrags = err;
+
+ ESP_SKB_CB(skb)->esp = esp;
+
+ esp_req = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags * 2);
+ if (!esp_req)
+ goto error;
+
+ param = esp_tmp_offload_info(esp_req);
+ iv = esp_tmp_iv(aead, param); /* Not used */
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = sg + nfrags;
+
+ /* Setup SG */
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+
+ /* Prepare SKB to include padded length and ESP header */
+ clen = skb->len;
+ pskb_put(skb, trailer, pad_len + alen);
+ skb_push(skb, x->props.header_len);
+ sg_init_table(dsg, nfrags);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_to_sgvec(skb, dsg, 20, skb->len - 20);
+ else
+ skb_to_sgvec(skb, dsg, 0, skb->len);
+ /* This is non-NULL only with UDP Encapsulation.
+ UDP Encapsulation is done by hardware. Port of UDP encapsulation
+ info are passed to hardware offload driver at ESP init time. Dynamic
+ info per a packet are saved and filled in after hardware offload
+ */
+ if (x->encap) {
+ struct xfrm_encap_tmpl *encap = x->encap;
+
+ spin_lock_bh(&x->lock);
+ param->encap_sport = encap->encap_sport;
+ param->encap_dport = encap->encap_dport;
+ spin_unlock_bh(&x->lock);
+ } else {
+ param->encap_sport = 0;
+ }
+
+ /* Format input parameters for hardware ESP offload algorithm.
+ For ESP transport mode, IP header and payload are passed as
+ the source sg. All associate data such as SPI, seq and etc,
+ are loaded at init time.
+
+ Input Src:
+ ----------------------------------------
+ | reserved1 | IP | Payload | reserved2 |
+ ----------------------------------------
+ ^
+ |
+ start if source sg
+
+ ----------------------------------------
+ | reserved1 | IP | Payload | reserved2 |
+ ----------------------------------------
+ ^
+ |
+ start if destination sg
+
+ reserved1 - space reserved for header (UDP ENCAP, ESP, & IV)
+ reserved2 - space reserved for padding + ICV
+
+ Output:
+ -------------------------------------------------------------
+ | IP | UDP ENCAP | ESP | IV | Payload | IPSec Padding | ICV |
+ -------------------------------------------------------------
+ UDP ENCAP if NAT
+
+ For ESP tunnel mode, IP header and payload is passed as
+ the source sg. All associate data such as SPI, seq and etc,
+ are loaded at init time. The outer IP is formed by tunnel module.
+
+ Input:
+ ----------------------------------------------------
+ | Outer IP | reserved1 | inner IP Header | Payload |
+ ----------------------------------------------------
+ ^
+ |
+ start of source sg
+
+ ----------------------------------------------------
+ | Outer IP | reserved1 | inner IP Header | Payload |
+ ----------------------------------------------------
+ ^ (inner IP header not moved)
+ |
+ start if destination sg
+
+ Outer IP - formed by tunnel mode code
+ reserved1 - space reserved for UDP, ESP, SEQ, and IV
+ inner IP Header - Start of sg. length is inner IP Header + payload
+
+ Output:
+ ------------------------------------------------------------------
+ |Outer IP|UDP ENCAP|SPI+SEQ|IV|Inner IP|Payload|IPSec Padding|ICV|
+ ------------------------------------------------------------------
+ Outer IP - Formed by tunnel mode code
+ UDP ENCAP if NAT
+
+ */
+ aead_request_set_callback(req, 0, esp_output_done_hw, skb);
+ aead_request_set_crypt(req, sg, dsg, clen, iv);
+
+ ESP_SKB_CB(skb)->req_ctx = esp_req;
+
+ err = crypto_aead_encrypt(req);
+ if (err == -EINPROGRESS)
+ goto error;
+
+ if (err == -EBUSY)
+ err = NET_XMIT_DROP;
- kfree(tmp);
+ err = esp_output_done2_hw(&req->base, err);
error:
return err;
}
+static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+{
+
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)
+ return esp_output_hw(x, skb);
+ else
+ return esp_output_sw(x, skb);
+}
+
static int esp_input_done2(struct sk_buff *skb, int err)
{
struct iphdr *iph;
- struct xfrm_state *x = xfrm_input_state(skb);
- struct esp_data *esp = x->data;
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct esp_data *esp = ESP_SKB_CB(skb)->esp;
struct crypto_aead *aead = esp->aead;
int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
@@ -239,8 +560,8 @@ static int esp_input_done2(struct sk_buff *skb, int err)
int ihl;
u8 nexthdr[2];
int padlen;
-
- kfree(ESP_SKB_CB(skb)->tmp);
+
+ esp_free_req_ctx(ESP_SKB_CB(skb));
if (unlikely(err))
goto out;
@@ -250,6 +571,7 @@ static int esp_input_done2(struct sk_buff *skb, int err)
err = -EINVAL;
padlen = nexthdr[0];
+
if (padlen + 2 + alen >= elen)
goto out;
@@ -300,7 +622,6 @@ static int esp_input_done2(struct sk_buff *skb, int err)
skb_set_transport_header(skb, -ihl);
err = nexthdr[1];
-
/* RFC4303: Drop dummy packets without any error */
if (err == IPPROTO_NONE)
err = -EINVAL;
@@ -321,7 +642,7 @@ static void esp_input_done(struct crypto_async_request *base, int err)
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
-static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
+static int esp_input_sw(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
@@ -347,15 +668,18 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
nfrags = err;
err = -ENOMEM;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+
+ ESP_SKB_CB(skb)->esp = esp;
+
+ tmp = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags + 1);
if (!tmp)
goto out;
- ESP_SKB_CB(skb)->tmp = tmp;
- iv = esp_tmp_iv(aead, tmp);
+ ESP_SKB_CB(skb)->req_ctx = tmp;
+ iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
- sg = asg + 1;
+ sg = asg + 1;
skb->ip_summed = CHECKSUM_NONE;
@@ -371,7 +695,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen, iv);
aead_request_set_assoc(req, asg, sizeof(*esph));
-
+
err = crypto_aead_decrypt(req);
if (err == -EINPROGRESS)
goto out;
@@ -382,6 +706,262 @@ out:
return err;
}
+static int esp_input_done2_hw(struct sk_buff *skb, int err)
+{
+ struct iphdr *iph;
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct esp_data *esp = ESP_SKB_CB(skb)->esp;
+ struct crypto_aead *aead = esp->aead;
+ int alen = crypto_aead_authsize(aead);
+ int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
+ int elen = skb->len - hlen;
+ int ihl;
+ u8 nexthdr[2];
+ int padlen;
+ void *esp_req = ESP_SKB_CB(skb)->req_ctx;
+
+ if (err >= 0) {
+ if ( err == 0) {
+ if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
+ BUG();
+ }
+ else {
+ nexthdr[0] = (unsigned char)(err >> 8);
+ nexthdr[1] = (unsigned char)(err >> 0);
+ }
+ }
+ else {
+ goto out;
+ }
+
+ err = -EINVAL;
+ padlen = nexthdr[0] + 2;
+
+ if (padlen + alen > elen)
+ goto out;
+
+ iph = ip_hdr(skb);
+ ihl = iph->ihl * 4;
+
+ if (x->encap) {
+ struct xfrm_encap_tmpl *encap = x->encap;
+ struct esp_offload_param *param = esp_tmp_offload_info(esp_req);
+ /*
+ * 1) if the NAT-T peer's IP or port changed then
+ * advertize the change to the keying daemon.
+ * This is an inbound SA, so just compare
+ * SRC ports.
+ */
+ if (iph->saddr != x->props.saddr.a4 ||
+ param->encap_sport != encap->encap_sport) {
+ xfrm_address_t ipaddr;
+
+ ipaddr.a4 = iph->saddr;
+ km_new_mapping(x, &ipaddr, param->encap_sport);
+
+ /* XXX: perhaps add an extra
+ * policy check here, to see
+ * if we should allow or
+ * reject a packet from a
+ * different source
+ * address/port.
+ */
+ }
+
+ /*
+ * 2) ignore UDP/TCP checksums in case
+ * of NAT-T in Transport Mode, or
+ * perform other post-processing fixes
+ * as per draft-ietf-ipsec-udp-encaps-06,
+ * section 3.1.2
+ */
+ if (x->props.mode == XFRM_MODE_TRANSPORT)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ pskb_trim(skb, skb->len - alen - padlen);
+ __skb_pull(skb, hlen);
+ skb_set_transport_header(skb, -ihl);
+
+ err = nexthdr[1];
+
+ /* RFC4303: Drop dummy packets without any error */
+ if (err == IPPROTO_NONE)
+ err = -EINVAL;
+out:
+ esp_free_req_ctx(ESP_SKB_CB(skb));
+ return err;
+}
+
+static void esp_input_done_hw(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+
+ xfrm_input_resume(skb, esp_input_done2_hw(skb, err));
+}
+
+static int esp_input_hw(struct xfrm_state *x, struct sk_buff *skb)
+{
+ struct ip_esp_hdr *esph;
+ struct esp_data *esp = x->data;
+ struct crypto_aead *aead = esp->aead;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
+ int nfrags;
+ void *esp_req;
+ u8 *iv;
+ struct scatterlist *sg;
+ struct scatterlist *dsg;
+ struct esp_offload_param *param;
+ int err = -EINVAL;
+ int hdr_len;
+ int clen;
+ int src_len;
+
+
+ /* For ESP transport mode, skb.data is at ESP header. skb.len
+ includes IP header and payload. skb network header, transport
+ header, and mac header is updated by transport module code.
+
+ Input:
+ -------------------------------------------------------------
+ | IP Hdr | UDP ENCAP | ESP | IV | Payload | IPSec Pad | ICV |
+ -------------------------------------------------------------
+ ^
+ |
+ skb.data and length start here
+ For ESP tunnel mode,
+
+ Input:
+ ----------------------------------------------------------
+ |Outer IP|UDP ENCAP|ESP|IV|inner IP|Payload|IPSec Pad|ICV|
+ ----------------------------------------------------------
+ ^
+ |
+ skb.data and length start here
+ */
+ if (!pskb_may_pull(skb, sizeof(*esph)))
+ goto out;
+
+ if (elen <= 0)
+ goto out;
+
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ goto out;
+ nfrags = err;
+
+ err = -ENOMEM;
+
+ ESP_SKB_CB(skb)->esp = esp;
+
+ esp_req = esp_alloc_req_ctx(ESP_SKB_CB(skb), aead, nfrags * 2);
+ if (!esp_req)
+ goto out;
+
+ ESP_SKB_CB(skb)->req_ctx = esp_req;
+ param = esp_tmp_offload_info(esp_req);
+ iv = esp_tmp_iv(aead, param);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = sg + nfrags;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is non-NULL only with UDP Encapsulation.
+ UDP Encapsulation is done after ESP processing. Save info as
+ required as hardware offload driver can override these info. */
+ if (x->encap) {
+ struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *uh;
+ uh = (struct udphdr *) ((u8 *) iph + iph->ihl * 4);
+ param->encap_sport = uh->source;
+ }
+
+ /* Setup SG */
+ sg_init_table(sg, nfrags);
+ hdr_len = skb_network_offset(skb);
+ if (x->props.mode == XFRM_MODE_TUNNEL) {
+ clen = skb->len;
+ skb_to_sgvec(skb, sg, 0, clen);
+ } else {
+ clen = -hdr_len + skb->len;
+ skb_to_sgvec(skb, sg, hdr_len, clen);
+ }
+ src_len = clen;
+
+ sg_init_table(dsg, nfrags);
+ if (x->props.mode == XFRM_MODE_TUNNEL) {
+ clen -= hdr_len + x->props.header_len;
+ skb_to_sgvec(skb, dsg, hdr_len + x->props.header_len, clen);
+ } else {
+ clen -= crypto_aead_authsize(aead);
+ clen -= (x->props.header_len);
+ /* clen -= crypto_aead_ivsize(aead);
+ clen += hdr_len; */
+ skb_to_sgvec(skb, dsg, hdr_len + x->props.header_len, clen);
+ }
+
+ /* For ESP transport mode:
+
+ Input:
+ --------------------------------------------------------------------
+ |IP|UDP ENCAP|ESP|IV| Payload | IPSec Pad | ICV |
+ --------------------------------------------------------------------
+ ^
+ |
+ start of source SG
+
+ -------------------------------------------------------------
+ | reserved | IP | Payload | Pad | Pad length | Next hdr |
+ -------------------------------------------------------------
+ ^ (Payload position not moved)
+ |
+ start of destination SG
+
+ Output:
+ -------------------------------------------------------------
+ | reserved | IP | Payload | Pad | Pad length | Next hdr |
+ -------------------------------------------------------------
+ IP header is moved right before payload field. Payload not moved.
+
+ For ESP tunnel mode:
+
+ Input:
+ --------------------------------------------------------------
+ |Out IP|UDP ENCAP|ESP | IV | In IP | Payload | IPSec Pad |ICV|
+ --------------------------------------------------------------
+ ESP - start of SG and length
+
+ Output:
+ ---------------------------------------------------------------------
+ | reserved | In IP | Payload |Pad |Pad len|Next Hder|
+ ---------------------------------------------------------------------
+ reserved is removed header but inner IP and payload are left at
+ same position
+
+ */
+ aead_request_set_callback(req, 0, esp_input_done_hw, skb);
+ aead_request_set_crypt(req, sg, dsg, src_len, iv);
+
+ err = crypto_aead_decrypt(req);
+ if (err == -EINPROGRESS)
+ goto out;
+
+ err = esp_input_done2_hw(skb, err);
+
+out:
+ return err;
+}
+
+static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
+{
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)
+ return esp_input_hw(x, skb);
+ else
+ return esp_input_sw(x, skb);
+}
+
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
@@ -414,16 +994,16 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
static void esp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
- struct iphdr *iph = (struct iphdr *)skb->data;
- struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+ struct iphdr *iph = (struct iphdr*)skb->data;
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return;
- x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
- if (!x)
+ x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP,
+AF_INET); if (!x)
return;
NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
ntohl(esph->spi), ntohl(iph->daddr));
@@ -438,6 +1018,16 @@ static void esp_destroy(struct xfrm_state *x)
return;
crypto_free_aead(esp->aead);
+
+#if CONFIG_INET_ESP_NR_REQ_CACHE > 0
+ /* Delete request cache */
+ while (atomic_dec_return(&esp->req_cache_cnt)) {
+ kfree(esp->req_cache[esp->req_cache_head]);
+ esp->req_cache_head = (esp->req_cache_head + 1) %
+ ESP_REQ_CACHE_MAX;
+ }
+#endif
+
kfree(esp);
}
@@ -446,16 +1036,91 @@ static int esp_init_aead(struct xfrm_state *x)
struct esp_data *esp = x->data;
struct crypto_aead *aead;
int err;
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ char *key;
+ int key_len;
+ struct rtattr *rta;
+ struct esp_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ } *param;
- aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ snprintf(alg_name, ARRAY_SIZE(alg_name),
+ "tunnel(esp(%s))", x->aead->alg_name);
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ snprintf(alg_name, ARRAY_SIZE(alg_name),
+ "transport(esp(%s))", x->aead->alg_name);
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP;
+ break;
+ default:
+ strncpy(alg_name, x->aead->alg_name, ARRAY_SIZE(alg_name));
+ break;
+ }
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead) && (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)) {
+ x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP);
+ aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
+ }
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
esp->aead = aead;
- err = crypto_aead_setkey(aead, x->aead->alg_key,
- (x->aead->alg_key_len + 7) / 8);
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) {
+ /* For esp offload, we must load esp offload parameters
+ via setkey function. */
+ key_len = RTA_SPACE(sizeof(*param)) +
+ ((x->aead->alg_key_len + 7) / 8);
+ key = kmalloc(key_len, 0);
+ rta = (void *) key;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->spi = cpu_to_be32(x->id.spi);
+ param->seq = cpu_to_be32(x->replay.oseq);
+ if (x->encap) {
+ int encap_type;
+
+ spin_lock_bh(&x->lock);
+ encap_type = x->encap->encap_type;
+ spin_unlock_bh(&x->lock);
+
+ switch (encap_type) {
+ default:
+ case UDP_ENCAP_ESPINUDP:
+ param->encap_uhl = cpu_to_be16(sizeof(struct udphdr));
+ break;
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ param->encap_uhl = cpu_to_be16(sizeof(struct udphdr) +
+ sizeof(__be32)*2);
+ break;
+ }
+ } else {
+ param->encap_uhl = 0;
+ }
+ param->pad_block_size = cpu_to_be16(esp->padlen);
+ memcpy(key + RTA_SPACE(sizeof(*param)),
+ x->aead->alg_key,
+ (x->aead->alg_key_len + 7) / 8);
+ } else {
+ key_len = (x->aead->alg_key_len + 7) / 8;
+ key = x->aead->alg_key;
+ }
+
+ err = crypto_aead_setkey(aead, key, key_len);
+ if (key != x->aead->alg_key)
+ kfree(key);
if (err)
goto error;
@@ -471,25 +1136,68 @@ static int esp_init_authenc(struct xfrm_state *x)
{
struct esp_data *esp = x->data;
struct crypto_aead *aead;
- struct crypto_authenc_key_param *param;
struct rtattr *rta;
char *key;
char *p;
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ /* Must be last */
+ } *esp_param = NULL;
+ struct crypto_authenc_key_param *param = NULL;
err = -EINVAL;
if (x->ealg == NULL)
goto error;
err = -ENAMETOOLONG;
- if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
- x->aalg ? x->aalg->alg_name : "digest_null",
- x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
- goto error;
+
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "tunnel(esp(authenc(%s,%s)))",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "transport(esp(authenc(%s,%s)))",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ x->alg_flags |= XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP;
+ break;
+ default:
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authenc(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ break;
+ }
aead = crypto_alloc_aead(authenc_name, 0, 0);
+ if (IS_ERR(aead) && (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)) {
+ x->alg_flags &= ~(XFRM_ALGO_FLAGS_OFFLOAD_TUNNEL
+ | XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT
+ | XFRM_ALGO_FLAGS_OFFLOAD_ESP);
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authenc(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
+ }
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -497,7 +1205,11 @@ static int esp_init_authenc(struct xfrm_state *x)
esp->aead = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
- (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
+ (x->ealg->alg_key_len + 7) / 8;
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP)
+ keylen += RTA_SPACE(sizeof(*esp_param));
+ else
+ keylen += RTA_SPACE(sizeof(*param));
err = -ENOMEM;
key = kmalloc(keylen, GFP_KERNEL);
if (!key)
@@ -506,9 +1218,15 @@ static int esp_init_authenc(struct xfrm_state *x)
p = key;
rta = (void *)p;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
- rta->rta_len = RTA_LENGTH(sizeof(*param));
- param = RTA_DATA(rta);
- p += RTA_SPACE(sizeof(*param));
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) {
+ rta->rta_len = RTA_LENGTH(sizeof(*esp_param));
+ esp_param = RTA_DATA(rta);
+ p += RTA_SPACE(sizeof(*esp_param));
+ } else {
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ p += RTA_SPACE(sizeof(*param));
+ }
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
@@ -535,7 +1253,39 @@ static int esp_init_authenc(struct xfrm_state *x)
goto free_key;
}
- param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_ESP) {
+ esp_param->authenc_param.enckeylen =
+ cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
+ /* For esp offload, we must load esp offload parameters
+ via setkey function. */
+ esp_param->spi = cpu_to_be32(x->id.spi);
+ esp_param->seq = cpu_to_be32(x->replay.oseq);
+ if (x->encap) {
+ int encap_type;
+
+ spin_lock_bh(&x->lock);
+ encap_type = x->encap->encap_type;
+ spin_unlock_bh(&x->lock);
+
+ switch (encap_type) {
+ default:
+ case UDP_ENCAP_ESPINUDP:
+ esp_param->encap_uhl =
+ cpu_to_be16(sizeof(struct udphdr));
+ break;
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ esp_param->encap_uhl =
+ cpu_to_be16(sizeof(struct udphdr) +
+ sizeof(__be32)*2);
+ break;
+ }
+ } else {
+ esp_param->encap_uhl = 0;
+ }
+ esp_param->pad_block_size = cpu_to_be16(esp->padlen);
+ } else {
+ param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
+ }
memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
err = crypto_aead_setkey(aead, key, keylen);
@@ -569,7 +1319,12 @@ static int esp_init_state(struct xfrm_state *x)
goto error;
aead = esp->aead;
-
+#if CONFIG_INET_ESP_NR_REQ_CACHE > 0
+ atomic_set(&esp->req_cache_cnt, 0);
+ esp->req_cache_head = 0;
+ esp->req_cache_tail = 0;
+#endif
+ esp->req_cache_size = esp_req_ctx_size(aead, 0);
esp->padlen = 0;
x->props.header_len = sizeof(struct ip_esp_hdr) +
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f1813bc7108..8b8b2766253 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -610,7 +610,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
/*
* We can't seek on a socket input
*/
- if (unlikely(*ppos))
+ if (unlikely(ppos))
return -ESPIPE;
ret = spliced = 0;
@@ -623,8 +623,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
if (ret < 0)
break;
else if (!ret) {
- if (spliced)
+ if (spliced >= len)
break;
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
@@ -1334,6 +1338,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_eat_skb(sk, skb, 0);
if (!desc->count)
break;
+ tp->copied_seq = seq;
}
tp->copied_seq = seq;
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
index fd840c7d75e..bf1498c138e 100644
--- a/net/ipv4/xfrm4_mode_transport.c
+++ b/net/ipv4/xfrm4_mode_transport.c
@@ -23,6 +23,15 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4;
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT) {
+ /* Hardware offload will take care of moving the IP header */
+ skb_set_network_header(skb, -x->props.header_len);
+ skb->mac_header = skb->network_header +
+ offsetof(struct iphdr, protocol);
+ skb->transport_header = skb->network_header + ihl;
+ return 0;
+ }
+
skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
@@ -42,8 +51,16 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
*/
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
- int ihl = skb->data - skb_transport_header(skb);
+ int ihl;
+
+ if (x->alg_flags & XFRM_ALGO_FLAGS_OFFLOAD_TRANPORT) {
+ /* Hardware offload will take care of move the IP header */
+ skb->network_header = skb->transport_header;
+ skb_reset_transport_header(skb);
+ return 0;
+ }
+ ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);