aboutsummaryrefslogtreecommitdiff
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c578
1 files changed, 363 insertions, 215 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b29dacf900f..fc17a9d309a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -160,10 +160,13 @@
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#ifdef CONFIG_XFRM
#include <net/xfrm.h>
#endif
+#include <net/netns/generic.h>
#include <asm/byteorder.h>
#include <linux/rcupdate.h>
#include <linux/bitops.h>
@@ -197,6 +200,7 @@
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
#define F_NODE (1<<15) /* Node memory alloc*/
+#define F_UDPCSUM (1<<16) /* Include UDP checksum */
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -212,7 +216,6 @@
#define PKTGEN_MAGIC 0xbe9be955
#define PG_PROC_DIR "pktgen"
#define PGCTRL "pgctrl"
-static struct proc_dir_entry *pg_proc_dir;
#define MAX_CFLOWS 65536
@@ -386,6 +389,9 @@ struct pktgen_dev {
#ifdef CONFIG_XFRM
__u8 ipsmode; /* IPSEC mode (config) */
__u8 ipsproto; /* IPSEC type (config) */
+ __u32 spi;
+ struct dst_entry dst;
+ struct dst_ops dstops;
#endif
char result[512];
};
@@ -397,7 +403,15 @@ struct pktgen_hdr {
__be32 tv_usec;
};
-static bool pktgen_exiting __read_mostly;
+
+static int pg_net_id __read_mostly;
+
+struct pktgen_net {
+ struct net *net;
+ struct proc_dir_entry *proc_dir;
+ struct list_head pktgen_threads;
+ bool pktgen_exiting;
+};
struct pktgen_thread {
spinlock_t if_lock; /* for list of devices */
@@ -414,6 +428,7 @@ struct pktgen_thread {
wait_queue_head_t queue;
struct completion start_done;
+ struct pktgen_net *net;
};
#define REMOVE 1
@@ -428,9 +443,9 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
const char *ifname, bool exact);
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
-static void pktgen_run_all_threads(void);
-static void pktgen_reset_all_threads(void);
-static void pktgen_stop_all_threads_ifs(void);
+static void pktgen_run_all_threads(struct pktgen_net *pn);
+static void pktgen_reset_all_threads(struct pktgen_net *pn);
+static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn);
static void pktgen_stop(struct pktgen_thread *t);
static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
@@ -442,7 +457,6 @@ static int pg_clone_skb_d __read_mostly;
static int debug __read_mostly;
static DEFINE_MUTEX(pktgen_thread_lock);
-static LIST_HEAD(pktgen_threads);
static struct notifier_block pktgen_notifier_block = {
.notifier_call = pktgen_device_event,
@@ -462,44 +476,41 @@ static int pgctrl_show(struct seq_file *seq, void *v)
static ssize_t pgctrl_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int err = 0;
char data[128];
+ struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- goto out;
- }
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (count == 0)
+ return -EINVAL;
if (count > sizeof(data))
count = sizeof(data);
- if (copy_from_user(data, buf, count)) {
- err = -EFAULT;
- goto out;
- }
- data[count - 1] = 0; /* Make string */
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ data[count - 1] = 0; /* Strip trailing '\n' and terminate string */
if (!strcmp(data, "stop"))
- pktgen_stop_all_threads_ifs();
+ pktgen_stop_all_threads_ifs(pn);
else if (!strcmp(data, "start"))
- pktgen_run_all_threads();
+ pktgen_run_all_threads(pn);
else if (!strcmp(data, "reset"))
- pktgen_reset_all_threads();
+ pktgen_reset_all_threads(pn);
else
pr_warning("Unknown command: %s\n", data);
- err = count;
-
-out:
- return err;
+ return count;
}
static int pgctrl_open(struct inode *inode, struct file *file)
{
- return single_open(file, pgctrl_show, PDE(inode)->data);
+ return single_open(file, pgctrl_show, PDE_DATA(inode));
}
static const struct file_operations pktgen_fops = {
@@ -562,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
is_zero_ether_addr(pkt_dev->src_mac) ?
pkt_dev->odev->dev_addr : pkt_dev->src_mac);
- seq_printf(seq, "dst_mac: ");
+ seq_puts(seq, "dst_mac: ");
seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
seq_printf(seq,
@@ -577,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->nr_labels) {
unsigned int i;
- seq_printf(seq, " mpls: ");
+ seq_puts(seq, " mpls: ");
for (i = 0; i < pkt_dev->nr_labels; i++)
seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -602,61 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node);
- seq_printf(seq, " Flags: ");
+ seq_puts(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
- seq_printf(seq, "IPV6 ");
+ seq_puts(seq, "IPV6 ");
if (pkt_dev->flags & F_IPSRC_RND)
- seq_printf(seq, "IPSRC_RND ");
+ seq_puts(seq, "IPSRC_RND ");
if (pkt_dev->flags & F_IPDST_RND)
- seq_printf(seq, "IPDST_RND ");
+ seq_puts(seq, "IPDST_RND ");
if (pkt_dev->flags & F_TXSIZE_RND)
- seq_printf(seq, "TXSIZE_RND ");
+ seq_puts(seq, "TXSIZE_RND ");
if (pkt_dev->flags & F_UDPSRC_RND)
- seq_printf(seq, "UDPSRC_RND ");
+ seq_puts(seq, "UDPSRC_RND ");
if (pkt_dev->flags & F_UDPDST_RND)
- seq_printf(seq, "UDPDST_RND ");
+ seq_puts(seq, "UDPDST_RND ");
+
+ if (pkt_dev->flags & F_UDPCSUM)
+ seq_puts(seq, "UDPCSUM ");
if (pkt_dev->flags & F_MPLS_RND)
- seq_printf(seq, "MPLS_RND ");
+ seq_puts(seq, "MPLS_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_RND)
- seq_printf(seq, "QUEUE_MAP_RND ");
+ seq_puts(seq, "QUEUE_MAP_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_CPU)
- seq_printf(seq, "QUEUE_MAP_CPU ");
+ seq_puts(seq, "QUEUE_MAP_CPU ");
if (pkt_dev->cflows) {
if (pkt_dev->flags & F_FLOW_SEQ)
- seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
+ seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
else
- seq_printf(seq, "FLOW_RND ");
+ seq_puts(seq, "FLOW_RND ");
}
#ifdef CONFIG_XFRM
- if (pkt_dev->flags & F_IPSEC_ON)
- seq_printf(seq, "IPSEC ");
+ if (pkt_dev->flags & F_IPSEC_ON) {
+ seq_puts(seq, "IPSEC ");
+ if (pkt_dev->spi)
+ seq_printf(seq, "spi:%u", pkt_dev->spi);
+ }
#endif
if (pkt_dev->flags & F_MACSRC_RND)
- seq_printf(seq, "MACSRC_RND ");
+ seq_puts(seq, "MACSRC_RND ");
if (pkt_dev->flags & F_MACDST_RND)
- seq_printf(seq, "MACDST_RND ");
+ seq_puts(seq, "MACDST_RND ");
if (pkt_dev->flags & F_VID_RND)
- seq_printf(seq, "VID_RND ");
+ seq_puts(seq, "VID_RND ");
if (pkt_dev->flags & F_SVID_RND)
- seq_printf(seq, "SVID_RND ");
+ seq_puts(seq, "SVID_RND ");
if (pkt_dev->flags & F_NODE)
- seq_printf(seq, "NODE_ALLOC ");
+ seq_puts(seq, "NODE_ALLOC ");
seq_puts(seq, "\n");
@@ -699,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->result[0])
seq_printf(seq, "Result: %s\n", pkt_dev->result);
else
- seq_printf(seq, "Result: Idle\n");
+ seq_puts(seq, "Result: Idle\n");
return 0;
}
@@ -1219,12 +1236,24 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!NODE_ALLOC") == 0)
pkt_dev->flags &= ~F_NODE;
+ else if (strcmp(f, "UDPCSUM") == 0)
+ pkt_dev->flags |= F_UDPCSUM;
+
+ else if (strcmp(f, "!UDPCSUM") == 0)
+ pkt_dev->flags &= ~F_UDPCSUM;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
f,
"IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
- "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
+ "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
+ "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
+ "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
+#ifdef CONFIG_XFRM
+ "IPSEC, "
+#endif
+ "NODE_ALLOC\n");
return count;
}
sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -1413,7 +1442,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (!mac_pton(valstr, pkt_dev->dst_mac))
return -EINVAL;
/* Set up Dest MAC */
- memcpy(&pkt_dev->hh[0], pkt_dev->dst_mac, ETH_ALEN);
+ ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac);
sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
return count;
@@ -1430,7 +1459,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (!mac_pton(valstr, pkt_dev->src_mac))
return -EINVAL;
/* Set up Src MAC */
- memcpy(&pkt_dev->hh[6], pkt_dev->src_mac, ETH_ALEN);
+ ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac);
sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
return count;
@@ -1455,7 +1484,18 @@ static ssize_t pktgen_if_write(struct file *file,
sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
return count;
}
+#ifdef CONFIG_XFRM
+ if (!strcmp(name, "spi")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+ i += len;
+ pkt_dev->spi = value;
+ sprintf(pg_result, "OK: spi=%u", pkt_dev->spi);
+ return count;
+ }
+#endif
if (!strcmp(name, "flowlen")) {
len = num_arg(&user_buffer[i], 10, &value);
if (len < 0)
@@ -1676,7 +1716,7 @@ static ssize_t pktgen_if_write(struct file *file,
static int pktgen_if_open(struct inode *inode, struct file *file)
{
- return single_open(file, pktgen_if_show, PDE(inode)->data);
+ return single_open(file, pktgen_if_show, PDE_DATA(inode));
}
static const struct file_operations pktgen_if_fops = {
@@ -1695,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
BUG_ON(!t);
- seq_printf(seq, "Running: ");
+ seq_puts(seq, "Running: ");
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
seq_printf(seq, "%s ", pkt_dev->odevname);
- seq_printf(seq, "\nStopped: ");
+ seq_puts(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
@@ -1711,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
else
- seq_printf(seq, "\nResult: NA\n");
+ seq_puts(seq, "\nResult: NA\n");
if_unlock(t);
@@ -1781,10 +1821,13 @@ static ssize_t pktgen_thread_write(struct file *file,
return -EFAULT;
i += len;
mutex_lock(&pktgen_thread_lock);
- pktgen_add_device(t, f);
+ ret = pktgen_add_device(t, f);
mutex_unlock(&pktgen_thread_lock);
- ret = count;
- sprintf(pg_result, "OK: add_device=%s", f);
+ if (!ret) {
+ ret = count;
+ sprintf(pg_result, "OK: add_device=%s", f);
+ } else
+ sprintf(pg_result, "ERROR: can not add device %s", f);
goto out;
}
@@ -1811,7 +1854,7 @@ out:
static int pktgen_thread_open(struct inode *inode, struct file *file)
{
- return single_open(file, pktgen_thread_show, PDE(inode)->data);
+ return single_open(file, pktgen_thread_show, PDE_DATA(inode));
}
static const struct file_operations pktgen_thread_fops = {
@@ -1824,13 +1867,14 @@ static const struct file_operations pktgen_thread_fops = {
};
/* Think find or remove for NN */
-static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
+static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
+ const char *ifname, int remove)
{
struct pktgen_thread *t;
struct pktgen_dev *pkt_dev = NULL;
bool exact = (remove == FIND);
- list_for_each_entry(t, &pktgen_threads, th_list) {
+ list_for_each_entry(t, &pn->pktgen_threads, th_list) {
pkt_dev = pktgen_find_dev(t, ifname, exact);
if (pkt_dev) {
if (remove) {
@@ -1848,7 +1892,7 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
/*
* mark a device for removal
*/
-static void pktgen_mark_device(const char *ifname)
+static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
{
struct pktgen_dev *pkt_dev = NULL;
const int max_tries = 10, msec_per_try = 125;
@@ -1859,7 +1903,7 @@ static void pktgen_mark_device(const char *ifname)
while (1) {
- pkt_dev = __pktgen_NN_threads(ifname, REMOVE);
+ pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE);
if (pkt_dev == NULL)
break; /* success */
@@ -1880,21 +1924,21 @@ static void pktgen_mark_device(const char *ifname)
mutex_unlock(&pktgen_thread_lock);
}
-static void pktgen_change_name(struct net_device *dev)
+static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev)
{
struct pktgen_thread *t;
- list_for_each_entry(t, &pktgen_threads, th_list) {
+ list_for_each_entry(t, &pn->pktgen_threads, th_list) {
struct pktgen_dev *pkt_dev;
list_for_each_entry(pkt_dev, &t->if_list, list) {
if (pkt_dev->odev != dev)
continue;
- remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
+ proc_remove(pkt_dev->entry);
pkt_dev->entry = proc_create_data(dev->name, 0600,
- pg_proc_dir,
+ pn->proc_dir,
&pktgen_if_fops,
pkt_dev);
if (!pkt_dev->entry)
@@ -1908,9 +1952,10 @@ static void pktgen_change_name(struct net_device *dev)
static int pktgen_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
- if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
+ if (pn->pktgen_exiting)
return NOTIFY_DONE;
/* It is OK that we do not hold the group lock right now,
@@ -1919,18 +1964,19 @@ static int pktgen_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- pktgen_change_name(dev);
+ pktgen_change_name(pn, dev);
break;
case NETDEV_UNREGISTER:
- pktgen_mark_device(dev->name);
+ pktgen_mark_device(pn, dev->name);
break;
}
return NOTIFY_DONE;
}
-static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
+static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn,
+ struct pktgen_dev *pkt_dev,
const char *ifname)
{
char b[IFNAMSIZ+5];
@@ -1944,13 +1990,14 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
}
b[i] = 0;
- return dev_get_by_name(&init_net, b);
+ return dev_get_by_name(pn->net, b);
}
/* Associate pktgen_dev with a device. */
-static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
+static int pktgen_setup_dev(const struct pktgen_net *pn,
+ struct pktgen_dev *pkt_dev, const char *ifname)
{
struct net_device *odev;
int err;
@@ -1961,7 +2008,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
pkt_dev->odev = NULL;
}
- odev = pktgen_dev_get_by_name(pkt_dev, ifname);
+ odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname);
if (!odev) {
pr_err("no such netdevice: \"%s\"\n", ifname);
return -ENODEV;
@@ -2015,10 +2062,10 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
- memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
+ ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
/* Set up Dest MAC */
- memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
+ ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
if (pkt_dev->flags & F_IPV6) {
int i, set = 0, err = 1;
@@ -2182,7 +2229,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
pkt_dev->curfl = 0; /*reset */
}
} else {
- flow = random32() % pkt_dev->cflows;
+ flow = prandom_u32() % pkt_dev->cflows;
pkt_dev->curfl = flow;
if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
@@ -2203,14 +2250,23 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
{
struct xfrm_state *x = pkt_dev->flows[flow].x;
+ struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
if (!x) {
- /*slow path: we dont already have xfrm_state*/
- x = xfrm_stateonly_find(&init_net, DUMMY_MARK,
- (xfrm_address_t *)&pkt_dev->cur_daddr,
- (xfrm_address_t *)&pkt_dev->cur_saddr,
- AF_INET,
- pkt_dev->ipsmode,
- pkt_dev->ipsproto, 0);
+
+ if (pkt_dev->spi) {
+ /* We need as quick as possible to find the right SA
+ * Searching with minimum criteria to archieve this.
+ */
+ x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
+ } else {
+ /* slow path: we dont already have xfrm_state */
+ x = xfrm_stateonly_find(pn->net, DUMMY_MARK,
+ (xfrm_address_t *)&pkt_dev->cur_daddr,
+ (xfrm_address_t *)&pkt_dev->cur_saddr,
+ AF_INET,
+ pkt_dev->ipsmode,
+ pkt_dev->ipsproto, 0);
+ }
if (x) {
pkt_dev->flows[flow].x = x;
set_pkt_overhead(pkt_dev);
@@ -2229,7 +2285,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
- t = random32() %
+ t = prandom_u32() %
(pkt_dev->queue_map_max -
pkt_dev->queue_map_min + 1)
+ pkt_dev->queue_map_min;
@@ -2261,7 +2317,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACSRC_RND)
- mc = random32() % pkt_dev->src_mac_count;
+ mc = prandom_u32() % pkt_dev->src_mac_count;
else {
mc = pkt_dev->cur_src_mac_offset++;
if (pkt_dev->cur_src_mac_offset >=
@@ -2287,7 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACDST_RND)
- mc = random32() % pkt_dev->dst_mac_count;
+ mc = prandom_u32() % pkt_dev->dst_mac_count;
else {
mc = pkt_dev->cur_dst_mac_offset++;
@@ -2314,21 +2370,21 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
for (i = 0; i < pkt_dev->nr_labels; i++)
if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
- ((__force __be32)random32() &
+ ((__force __be32)prandom_u32() &
htonl(0x000fffff));
}
if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
- pkt_dev->vlan_id = random32() & (4096-1);
+ pkt_dev->vlan_id = prandom_u32() & (4096 - 1);
}
if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
- pkt_dev->svlan_id = random32() & (4096 - 1);
+ pkt_dev->svlan_id = prandom_u32() & (4096 - 1);
}
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
if (pkt_dev->flags & F_UDPSRC_RND)
- pkt_dev->cur_udp_src = random32() %
+ pkt_dev->cur_udp_src = prandom_u32() %
(pkt_dev->udp_src_max - pkt_dev->udp_src_min)
+ pkt_dev->udp_src_min;
@@ -2341,7 +2397,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
if (pkt_dev->flags & F_UDPDST_RND) {
- pkt_dev->cur_udp_dst = random32() %
+ pkt_dev->cur_udp_dst = prandom_u32() %
(pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)
+ pkt_dev->udp_dst_min;
} else {
@@ -2358,7 +2414,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (imn < imx) {
__u32 t;
if (pkt_dev->flags & F_IPSRC_RND)
- t = random32() % (imx - imn) + imn;
+ t = prandom_u32() % (imx - imn) + imn;
else {
t = ntohl(pkt_dev->cur_saddr);
t++;
@@ -2379,17 +2435,15 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__be32 s;
if (pkt_dev->flags & F_IPDST_RND) {
- t = random32() % (imx - imn) + imn;
- s = htonl(t);
-
- while (ipv4_is_loopback(s) ||
- ipv4_is_multicast(s) ||
- ipv4_is_lbcast(s) ||
- ipv4_is_zeronet(s) ||
- ipv4_is_local_multicast(s)) {
- t = random32() % (imx - imn) + imn;
+ do {
+ t = prandom_u32() %
+ (imx - imn) + imn;
s = htonl(t);
- }
+ } while (ipv4_is_loopback(s) ||
+ ipv4_is_multicast(s) ||
+ ipv4_is_lbcast(s) ||
+ ipv4_is_zeronet(s) ||
+ ipv4_is_local_multicast(s));
pkt_dev->cur_daddr = s;
} else {
t = ntohl(pkt_dev->cur_daddr);
@@ -2420,7 +2474,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
for (i = 0; i < 4; i++) {
pkt_dev->cur_in6_daddr.s6_addr32[i] =
- (((__force __be32)random32() |
+ (((__force __be32)prandom_u32() |
pkt_dev->min_in6_daddr.s6_addr32[i]) &
pkt_dev->max_in6_daddr.s6_addr32[i]);
}
@@ -2430,7 +2484,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
__u32 t;
if (pkt_dev->flags & F_TXSIZE_RND) {
- t = random32() %
+ t = prandom_u32() %
(pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)
+ pkt_dev->min_pkt_size;
} else {
@@ -2448,31 +2502,47 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
#ifdef CONFIG_XFRM
+static u32 pktgen_dst_metrics[RTAX_MAX + 1] = {
+
+ [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */
+};
+
static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
{
struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
int err = 0;
+ struct net *net = dev_net(pkt_dev->odev);
if (!x)
return 0;
/* XXX: we dont support tunnel mode for now until
* we resolve the dst issue */
- if (x->props.mode != XFRM_MODE_TRANSPORT)
+ if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0))
return 0;
- spin_lock(&x->lock);
+ /* But when user specify an valid SPI, transformation
+ * supports both transport/tunnel mode + ESP/AH type.
+ */
+ if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
+ skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF;
+ rcu_read_lock_bh();
err = x->outer_mode->output(x, skb);
- if (err)
+ rcu_read_unlock_bh();
+ if (err) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
goto error;
+ }
err = x->type->output(x, skb);
- if (err)
+ if (err) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
goto error;
-
+ }
+ spin_lock_bh(&x->lock);
x->curlft.bytes += skb->len;
x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
error:
- spin_unlock(&x->lock);
return err;
}
@@ -2500,6 +2570,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
if (x) {
int ret;
__u8 *eth;
+ struct iphdr *iph;
+
nhead = x->props.header_len - skb_headroom(skb);
if (nhead > 0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2521,6 +2593,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
eth = (__u8 *) skb_push(skb, ETH_HLEN);
memcpy(eth, pkt_dev->hh, 12);
*(u16 *) &eth[12] = protocol;
+
+ /* Update IPv4 header len as well as checksum value */
+ iph = ip_hdr(skb);
+ iph->tot_len = htons(skb->len - ETH_HLEN);
+ ip_send_check(iph);
}
}
return 1;
@@ -2612,6 +2689,29 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
pgh->tv_usec = htonl(timestamp.tv_usec);
}
+static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
+ struct pktgen_dev *pkt_dev,
+ unsigned int extralen)
+{
+ struct sk_buff *skb = NULL;
+ unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
+ pkt_dev->pkt_overhead;
+
+ if (pkt_dev->flags & F_NODE) {
+ int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
+
+ skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
+ if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
+ skb->dev = dev;
+ }
+ } else {
+ skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
+ }
+
+ return skb;
+}
+
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@@ -2642,32 +2742,13 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
datalen = (odev->hard_header_len + 16) & ~0xf;
- if (pkt_dev->flags & F_NODE) {
- int node;
-
- if (pkt_dev->node >= 0)
- node = pkt_dev->node;
- else
- node = numa_node_id();
-
- skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
- + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD);
- skb->dev = odev;
- }
- }
- else
- skb = __netdev_alloc_skb(odev,
- pkt_dev->cur_pkt_size + 64
- + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
-
+ skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
}
- prefetchw(skb->data);
+ prefetchw(skb->data);
skb_reserve(skb, datalen);
/* Reserve for ethernet and IP header */
@@ -2693,15 +2774,15 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
*vlan_encapsulated_proto = htons(ETH_P_IP);
}
- skb->network_header = skb->tail;
- skb->transport_header = skb->network_header + sizeof(struct iphdr);
- skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, skb->len);
+ iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr));
+
+ skb_set_transport_header(skb, skb->len);
+ udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
skb->priority = pkt_dev->skb_priority;
- iph = ip_hdr(skb);
- udph = udp_hdr(skb);
-
memcpy(eth, pkt_dev->hh, 12);
*(__be16 *) & eth[12] = protocol;
@@ -2714,7 +2795,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
udph->len = htons(datalen + 8); /* DATA + udphdr */
- udph->check = 0; /* No checksum */
+ udph->check = 0;
iph->ihl = 5;
iph->version = 4;
@@ -2728,13 +2809,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
- iph->check = 0;
- iph->check = ip_fast_csum((void *)iph, iph->ihl);
+ ip_send_check(iph);
skb->protocol = protocol;
- skb->mac_header = (skb->network_header - ETH_HLEN -
- pkt_dev->pkt_overhead);
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V4_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+ udp4_hwcsum(skb, udph->source, udph->dest);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ datalen + 8, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
@@ -2751,7 +2847,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct sk_buff *skb = NULL;
__u8 *eth;
struct udphdr *udph;
- int datalen;
+ int datalen, udplen;
struct ipv6hdr *iph;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
@@ -2773,15 +2869,13 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
mod_cur_headers(pkt_dev);
queue_map = pkt_dev->cur_queue_map;
- skb = __netdev_alloc_skb(odev,
- pkt_dev->cur_pkt_size + 64
- + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
+ skb = pktgen_alloc_skb(odev, pkt_dev, 16);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
}
- prefetchw(skb->data);
+ prefetchw(skb->data);
skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */
@@ -2807,13 +2901,14 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
*vlan_encapsulated_proto = htons(ETH_P_IPV6);
}
- skb->network_header = skb->tail;
- skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
- skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, skb->len);
+ iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
+
+ skb_set_transport_header(skb, skb->len);
+ udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
skb->priority = pkt_dev->skb_priority;
- iph = ipv6_hdr(skb);
- udph = udp_hdr(skb);
memcpy(eth, pkt_dev->hh, 12);
*(__be16 *) &eth[12] = protocol;
@@ -2828,10 +2923,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
net_info_ratelimited("increased datalen to %d\n", datalen);
}
+ udplen = datalen + sizeof(struct udphdr);
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
- udph->len = htons(datalen + sizeof(struct udphdr));
- udph->check = 0; /* No checksum */
+ udph->len = htons(udplen);
+ udph->check = 0;
*(__be32 *) iph = htonl(0x60000000); /* Version + flow */
@@ -2842,18 +2938,33 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->hop_limit = 32;
- iph->payload_len = htons(sizeof(struct udphdr) + datalen);
+ iph->payload_len = htons(udplen);
iph->nexthdr = IPPROTO_UDP;
iph->daddr = pkt_dev->cur_in6_daddr;
iph->saddr = pkt_dev->cur_in6_saddr;
- skb->mac_header = (skb->network_header - ETH_HLEN -
- pkt_dev->pkt_overhead);
skb->protocol = protocol;
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V6_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
@@ -2912,7 +3023,7 @@ static void pktgen_run(struct pktgen_thread *t)
t->control &= ~(T_STOP);
}
-static void pktgen_stop_all_threads_ifs(void)
+static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn)
{
struct pktgen_thread *t;
@@ -2920,7 +3031,7 @@ static void pktgen_stop_all_threads_ifs(void)
mutex_lock(&pktgen_thread_lock);
- list_for_each_entry(t, &pktgen_threads, th_list)
+ list_for_each_entry(t, &pn->pktgen_threads, th_list)
t->control |= T_STOP;
mutex_unlock(&pktgen_thread_lock);
@@ -2956,28 +3067,28 @@ signal:
return 0;
}
-static int pktgen_wait_all_threads_run(void)
+static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
{
struct pktgen_thread *t;
int sig = 1;
mutex_lock(&pktgen_thread_lock);
- list_for_each_entry(t, &pktgen_threads, th_list) {
+ list_for_each_entry(t, &pn->pktgen_threads, th_list) {
sig = pktgen_wait_thread_run(t);
if (sig == 0)
break;
}
if (sig == 0)
- list_for_each_entry(t, &pktgen_threads, th_list)
+ list_for_each_entry(t, &pn->pktgen_threads, th_list)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
return sig;
}
-static void pktgen_run_all_threads(void)
+static void pktgen_run_all_threads(struct pktgen_net *pn)
{
struct pktgen_thread *t;
@@ -2985,7 +3096,7 @@ static void pktgen_run_all_threads(void)
mutex_lock(&pktgen_thread_lock);
- list_for_each_entry(t, &pktgen_threads, th_list)
+ list_for_each_entry(t, &pn->pktgen_threads, th_list)
t->control |= (T_RUN);
mutex_unlock(&pktgen_thread_lock);
@@ -2993,10 +3104,10 @@ static void pktgen_run_all_threads(void)
/* Propagate thread->control */
schedule_timeout_interruptible(msecs_to_jiffies(125));
- pktgen_wait_all_threads_run();
+ pktgen_wait_all_threads_run(pn);
}
-static void pktgen_reset_all_threads(void)
+static void pktgen_reset_all_threads(struct pktgen_net *pn)
{
struct pktgen_thread *t;
@@ -3004,7 +3115,7 @@ static void pktgen_reset_all_threads(void)
mutex_lock(&pktgen_thread_lock);
- list_for_each_entry(t, &pktgen_threads, th_list)
+ list_for_each_entry(t, &pn->pktgen_threads, th_list)
t->control |= (T_REMDEVALL);
mutex_unlock(&pktgen_thread_lock);
@@ -3012,7 +3123,7 @@ static void pktgen_reset_all_threads(void)
/* Propagate thread->control */
schedule_timeout_interruptible(msecs_to_jiffies(125));
- pktgen_wait_all_threads_run();
+ pktgen_wait_all_threads_run(pn);
}
static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
@@ -3154,9 +3265,7 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
static void pktgen_rem_thread(struct pktgen_thread *t)
{
/* Remove from the thread list */
-
- remove_proc_entry(t->tsk->comm, pg_proc_dir);
-
+ remove_proc_entry(t->tsk->comm, t->net->proc_dir);
}
static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3229,9 +3338,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
- __netif_tx_lock_bh(txq);
+ local_bh_disable();
+
+ HARD_TX_LOCK(odev, txq, smp_processor_id());
- if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
+ if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
@@ -3265,7 +3376,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 0;
}
unlock:
- __netif_tx_unlock_bh(txq);
+ HARD_TX_UNLOCK(odev, txq);
+
+ local_bh_enable();
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
@@ -3302,7 +3415,7 @@ static int pktgen_thread_worker(void *arg)
pkt_dev = next_to_run(t);
if (unlikely(!pkt_dev && t->control == 0)) {
- if (pktgen_exiting)
+ if (t->net->pktgen_exiting)
break;
wait_event_interruptible_timeout(t->queue,
t->control != 0,
@@ -3424,7 +3537,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
/* We don't allow a device to be on several threads */
- pkt_dev = __pktgen_NN_threads(ifname, FIND);
+ pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
if (pkt_dev) {
pr_err("ERROR: interface already used\n");
return -EBUSY;
@@ -3459,13 +3572,13 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_id = 0xffff;
pkt_dev->node = -1;
- err = pktgen_setup_dev(pkt_dev, ifname);
+ err = pktgen_setup_dev(t->net, pkt_dev, ifname);
if (err)
goto out1;
if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
pkt_dev->clone_skb = pg_clone_skb_d;
- pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
+ pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
&pktgen_if_fops, pkt_dev);
if (!pkt_dev->entry) {
pr_err("cannot create %s/%s procfs entry\n",
@@ -3476,6 +3589,17 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
#ifdef CONFIG_XFRM
pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
pkt_dev->ipsproto = IPPROTO_ESP;
+
+ /* xfrm tunnel mode needs additional dst to extract outter
+ * ip header protocol/ttl/id field, here creat a phony one.
+ * instead of looking for a valid rt, which definitely hurting
+ * performance under such circumstance.
+ */
+ pkt_dev->dstops.family = AF_INET;
+ pkt_dev->dst.dev = pkt_dev->odev;
+ dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false);
+ pkt_dev->dst.child = &pkt_dev->dst;
+ pkt_dev->dst.ops = &pkt_dev->dstops;
#endif
return add_dev_to_thread(t, pkt_dev);
@@ -3490,7 +3614,7 @@ out1:
return err;
}
-static int __init pktgen_create_thread(int cpu)
+static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
{
struct pktgen_thread *t;
struct proc_dir_entry *pe;
@@ -3508,7 +3632,7 @@ static int __init pktgen_create_thread(int cpu)
INIT_LIST_HEAD(&t->if_list);
- list_add_tail(&t->th_list, &pktgen_threads);
+ list_add_tail(&t->th_list, &pn->pktgen_threads);
init_completion(&t->start_done);
p = kthread_create_on_node(pktgen_thread_worker,
@@ -3524,7 +3648,7 @@ static int __init pktgen_create_thread(int cpu)
kthread_bind(p, cpu);
t->tsk = p;
- pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir,
+ pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
&pktgen_thread_fops, t);
if (!pe) {
pr_err("cannot create %s/%s procfs entry\n",
@@ -3535,6 +3659,7 @@ static int __init pktgen_create_thread(int cpu)
return -EINVAL;
}
+ t->net = pn;
wake_up_process(p);
wait_for_completion(&t->start_done);
@@ -3560,7 +3685,6 @@ static void _rem_dev_from_if_list(struct pktgen_thread *t,
static int pktgen_remove_device(struct pktgen_thread *t,
struct pktgen_dev *pkt_dev)
{
-
pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
if (pkt_dev->running) {
@@ -3580,7 +3704,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
_rem_dev_from_if_list(t, pkt_dev);
if (pkt_dev->entry)
- remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
+ proc_remove(pkt_dev->entry);
#ifdef CONFIG_XFRM
free_SAs(pkt_dev);
@@ -3592,63 +3716,63 @@ static int pktgen_remove_device(struct pktgen_thread *t,
return 0;
}
-static int __init pg_init(void)
+static int __net_init pg_net_init(struct net *net)
{
- int cpu;
+ struct pktgen_net *pn = net_generic(net, pg_net_id);
struct proc_dir_entry *pe;
- int ret = 0;
-
- pr_info("%s", version);
-
- pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
- if (!pg_proc_dir)
+ int cpu, ret = 0;
+
+ pn->net = net;
+ INIT_LIST_HEAD(&pn->pktgen_threads);
+ pn->pktgen_exiting = false;
+ pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
+ if (!pn->proc_dir) {
+ pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
-
- pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
+ }
+ pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops);
if (pe == NULL) {
- pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
+ pr_err("cannot create %s procfs entry\n", PGCTRL);
ret = -EINVAL;
- goto remove_dir;
+ goto remove;
}
- register_netdevice_notifier(&pktgen_notifier_block);
-
for_each_online_cpu(cpu) {
int err;
- err = pktgen_create_thread(cpu);
+ err = pktgen_create_thread(cpu, pn);
if (err)
- pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n",
+ pr_warn("Cannot create thread for cpu %d (%d)\n",
cpu, err);
}
- if (list_empty(&pktgen_threads)) {
- pr_err("ERROR: Initialization failed for all threads\n");
+ if (list_empty(&pn->pktgen_threads)) {
+ pr_err("Initialization failed for all threads\n");
ret = -ENODEV;
- goto unregister;
+ goto remove_entry;
}
return 0;
- unregister:
- unregister_netdevice_notifier(&pktgen_notifier_block);
- remove_proc_entry(PGCTRL, pg_proc_dir);
- remove_dir:
- proc_net_remove(&init_net, PG_PROC_DIR);
+remove_entry:
+ remove_proc_entry(PGCTRL, pn->proc_dir);
+remove:
+ remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
return ret;
}
-static void __exit pg_cleanup(void)
+static void __net_exit pg_net_exit(struct net *net)
{
+ struct pktgen_net *pn = net_generic(net, pg_net_id);
struct pktgen_thread *t;
struct list_head *q, *n;
LIST_HEAD(list);
/* Stop all interfaces & threads */
- pktgen_exiting = true;
+ pn->pktgen_exiting = true;
mutex_lock(&pktgen_thread_lock);
- list_splice_init(&pktgen_threads, &list);
+ list_splice_init(&pn->pktgen_threads, &list);
mutex_unlock(&pktgen_thread_lock);
list_for_each_safe(q, n, &list) {
@@ -3658,12 +3782,36 @@ static void __exit pg_cleanup(void)
kfree(t);
}
- /* Un-register us from receiving netdevice events */
- unregister_netdevice_notifier(&pktgen_notifier_block);
+ remove_proc_entry(PGCTRL, pn->proc_dir);
+ remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
+}
+
+static struct pernet_operations pg_net_ops = {
+ .init = pg_net_init,
+ .exit = pg_net_exit,
+ .id = &pg_net_id,
+ .size = sizeof(struct pktgen_net),
+};
+
+static int __init pg_init(void)
+{
+ int ret = 0;
- /* Clean up proc file system */
- remove_proc_entry(PGCTRL, pg_proc_dir);
- proc_net_remove(&init_net, PG_PROC_DIR);
+ pr_info("%s", version);
+ ret = register_pernet_subsys(&pg_net_ops);
+ if (ret)
+ return ret;
+ ret = register_netdevice_notifier(&pktgen_notifier_block);
+ if (ret)
+ unregister_pernet_subsys(&pg_net_ops);
+
+ return ret;
+}
+
+static void __exit pg_cleanup(void)
+{
+ unregister_netdevice_notifier(&pktgen_notifier_block);
+ unregister_pernet_subsys(&pg_net_ops);
}
module_init(pg_init);