aboutsummaryrefslogtreecommitdiff
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c215
1 files changed, 111 insertions, 104 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0bcecbf0658..6e79e96cb4f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -192,11 +192,10 @@
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
/* Thread control flag bits */
-#define T_TERMINATE (1<<0)
-#define T_STOP (1<<1) /* Stop run */
-#define T_RUN (1<<2) /* Start run */
-#define T_REMDEVALL (1<<3) /* Remove all devs */
-#define T_REMDEV (1<<4) /* Remove one dev */
+#define T_STOP (1<<0) /* Stop run */
+#define T_RUN (1<<1) /* Start run */
+#define T_REMDEVALL (1<<2) /* Remove all devs */
+#define T_REMDEV (1<<3) /* Remove one dev */
/* If lock -- can be removed after some work */
#define if_lock(t) spin_lock(&(t->if_lock));
@@ -336,6 +335,7 @@ struct pktgen_dev {
__u32 cur_src_mac_offset;
__be32 cur_saddr;
__be32 cur_daddr;
+ __u16 ip_id;
__u16 cur_udp_dst;
__u16 cur_udp_src;
__u16 cur_queue_map;
@@ -363,6 +363,7 @@ struct pktgen_dev {
* device name (not when the inject is
* started as it used to do.)
*/
+ char odevname[32];
struct flow_state *flows;
unsigned cflows; /* Concurrent flows (config) */
unsigned lflow; /* Flow length (config) */
@@ -426,7 +427,7 @@ static const char version[] =
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname);
+ const char *ifname, bool exact);
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(void);
static void pktgen_reset_all_threads(void);
@@ -528,7 +529,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_printf(seq,
" frags: %d delay: %llu clone_skb: %d ifname: %s\n",
pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
- pkt_dev->clone_skb, pkt_dev->odev->name);
+ pkt_dev->clone_skb, pkt_dev->odevname);
seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
pkt_dev->lflow);
@@ -965,7 +966,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (value == 0x7FFFFFFF)
pkt_dev->delay = ULLONG_MAX;
else
- pkt_dev->delay = (u64)value * NSEC_PER_USEC;
+ pkt_dev->delay = (u64)value;
sprintf(pg_result, "OK: delay=%llu",
(unsigned long long) pkt_dev->delay);
@@ -1688,13 +1689,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
seq_printf(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1818,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
{
struct pktgen_thread *t;
struct pktgen_dev *pkt_dev = NULL;
+ bool exact = (remove == FIND);
list_for_each_entry(t, &pktgen_threads, th_list) {
- pkt_dev = pktgen_find_dev(t, ifname);
+ pkt_dev = pktgen_find_dev(t, ifname, exact);
if (pkt_dev) {
if (remove) {
if_lock(t);
@@ -1994,7 +1996,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_min (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_min = ntxq - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2004,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_max (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_max = ntxq - 1;
}
@@ -2105,18 +2107,20 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{
- ktime_t start;
- s32 remaining;
+ ktime_t start_time, end_time;
+ s64 remaining;
struct hrtimer_sleeper t;
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
- if (remaining <= 0)
+ if (remaining <= 0) {
+ pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
return;
+ }
- start = ktime_now();
+ start_time = ktime_now();
if (remaining < 100)
udelay(remaining); /* really small just spin */
else {
@@ -2135,7 +2139,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
} while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING);
}
- pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
+ end_time = ktime_now();
+
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+ pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -2208,7 +2215,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
if (pkt_dev->flags & F_QUEUE_MAP_CPU)
pkt_dev->cur_queue_map = smp_processor_id();
- else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
+ else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = random32() %
@@ -2626,6 +2633,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->protocol = IPPROTO_UDP; /* UDP */
iph->saddr = pkt_dev->cur_saddr;
iph->daddr = pkt_dev->cur_daddr;
+ iph->id = htons(pkt_dev->ip_id);
+ pkt_dev->ip_id++;
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
@@ -2637,24 +2646,26 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
+ if (pkt_dev->nfrags <= 0) {
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
+ memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
+ } else {
int frags = pkt_dev->nfrags;
- int i;
+ int i, len;
pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
if (frags > MAX_SKB_FRAGS)
frags = MAX_SKB_FRAGS;
if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
+ len = datalen - frags * PAGE_SIZE;
+ memset(skb_put(skb, len), 0, len);
datalen = frags * PAGE_SIZE;
}
i = 0;
while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
+ struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
skb_shinfo(skb)->frags[i].page = page;
skb_shinfo(skb)->frags[i].page_offset = 0;
skb_shinfo(skb)->frags[i].size =
@@ -3253,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
if (!pkt_dev->running) {
printk(KERN_WARNING "pktgen: interface: %s is already "
- "stopped\n", pkt_dev->odev->name);
+ "stopped\n", pkt_dev->odevname);
return -EINVAL;
}
@@ -3365,19 +3376,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
mutex_unlock(&pktgen_thread_lock);
}
-static void idle(struct pktgen_dev *pkt_dev)
+static void pktgen_resched(struct pktgen_dev *pkt_dev)
{
ktime_t idle_start = ktime_now();
+ schedule();
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+}
- if (need_resched())
- schedule();
- else
- cpu_relax();
+static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
+{
+ ktime_t idle_start = ktime_now();
+ while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+ if (signal_pending(current))
+ break;
+
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}
-
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
@@ -3387,36 +3408,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
u16 queue_map;
int ret;
- if (pkt_dev->delay) {
- spin(pkt_dev, pkt_dev->next_tx);
-
- /* This is max DELAY, this has special meaning of
- * "never transmit"
- */
- if (pkt_dev->delay == ULLONG_MAX) {
- pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
- return;
- }
- }
-
- if (!pkt_dev->skb) {
- set_cur_queue_map(pkt_dev);
- queue_map = pkt_dev->cur_queue_map;
- } else {
- queue_map = skb_get_queue_mapping(pkt_dev->skb);
+ /* If device is offline, then don't send */
+ if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
+ pktgen_stop_device(pkt_dev);
+ return;
}
- txq = netdev_get_tx_queue(odev, queue_map);
- /* Did we saturate the queue already? */
- if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
- /* If device is down, then all queues are permnantly frozen */
- if (netif_running(odev))
- idle(pkt_dev);
- else
- pktgen_stop_device(pkt_dev);
+ /* This is max DELAY, this has special meaning of
+ * "never transmit"
+ */
+ if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
+ pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
return;
}
+ /* If no skb or clone count exhausted then get new one */
if (!pkt_dev->skb || (pkt_dev->last_ok &&
++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
/* build a new pkt */
@@ -3435,54 +3441,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->clone_count = 0; /* reset counter */
}
- /* fill_packet() might have changed the queue */
+ if (pkt_dev->delay && pkt_dev->last_ok)
+ spin(pkt_dev, pkt_dev->next_tx);
+
queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
- pkt_dev->last_ok = 0;
- else {
- atomic_inc(&(pkt_dev->skb->users));
+ atomic_inc(&(pkt_dev->skb->users));
- retry_now:
+ if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
+ ret = NETDEV_TX_BUSY;
+ else
ret = (*xmit)(pkt_dev->skb, odev);
- switch (ret) {
- case NETDEV_TX_OK:
- txq_trans_update(txq);
- pkt_dev->last_ok = 1;
- pkt_dev->sofar++;
- pkt_dev->seq_num++;
- pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
- break;
- case NETDEV_TX_LOCKED:
- cpu_relax();
- goto retry_now;
- default: /* Drivers are not supposed to return other values! */
- if (net_ratelimit())
- pr_info("pktgen: %s xmit error: %d\n",
- odev->name, ret);
- pkt_dev->errors++;
- /* fallthru */
- case NETDEV_TX_BUSY:
- /* Retry it next time */
- atomic_dec(&(pkt_dev->skb->users));
- pkt_dev->last_ok = 0;
- }
-
- if (pkt_dev->delay)
- pkt_dev->next_tx = ktime_add_ns(ktime_now(),
- pkt_dev->delay);
+
+ switch (ret) {
+ case NETDEV_TX_OK:
+ txq_trans_update(txq);
+ pkt_dev->last_ok = 1;
+ pkt_dev->sofar++;
+ pkt_dev->seq_num++;
+ pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
+ break;
+ default: /* Drivers are not supposed to return other values! */
+ if (net_ratelimit())
+ pr_info("pktgen: %s xmit error: %d\n",
+ pkt_dev->odevname, ret);
+ pkt_dev->errors++;
+ /* fallthru */
+ case NETDEV_TX_LOCKED:
+ case NETDEV_TX_BUSY:
+ /* Retry it next time */
+ atomic_dec(&(pkt_dev->skb->users));
+ pkt_dev->last_ok = 0;
}
__netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
- while (atomic_read(&(pkt_dev->skb->users)) != 1) {
- if (signal_pending(current))
- break;
- idle(pkt_dev);
- }
+ pktgen_wait_for_skb(pkt_dev);
/* Done with this */
pktgen_stop_device(pkt_dev);
@@ -3515,20 +3512,24 @@ static int pktgen_thread_worker(void *arg)
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
- if (!pkt_dev &&
- (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
- == 0) {
- prepare_to_wait(&(t->queue), &wait,
- TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- finish_wait(&(t->queue), &wait);
+ if (unlikely(!pkt_dev && t->control == 0)) {
+ wait_event_interruptible_timeout(t->queue,
+ t->control != 0,
+ HZ/10);
+ continue;
}
__set_current_state(TASK_RUNNING);
- if (pkt_dev)
+ if (likely(pkt_dev)) {
pktgen_xmit(pkt_dev);
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
+
if (t->control & T_STOP) {
pktgen_stop(t);
t->control &= ~(T_STOP);
@@ -3567,13 +3568,18 @@ static int pktgen_thread_worker(void *arg)
}
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname)
+ const char *ifname, bool exact)
{
struct pktgen_dev *p, *pkt_dev = NULL;
- if_lock(t);
+ size_t len = strlen(ifname);
+ if_lock(t);
list_for_each_entry(p, &t->if_list, list)
- if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
+ if (strncmp(p->odevname, ifname, len) == 0) {
+ if (p->odevname[len]) {
+ if (exact || p->odevname[len] != '@')
+ continue;
+ }
pkt_dev = p;
break;
}
@@ -3629,6 +3635,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
if (!pkt_dev)
return -ENOMEM;
+ strcpy(pkt_dev->odevname, ifname);
pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
if (pkt_dev->flows == NULL) {
kfree(pkt_dev);