aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-31 16:41:35 +0200
committerPatrick McHardy <kaber@trash.net>2010-05-31 16:41:35 +0200
commit7489aec8eed4f2f1eb3b4d35763bd3ea30b32ef5 (patch)
treefe2450679dc217183421e606b3912641545596bd /include
parentc936e8bd1de2fa50c49e3df6fa5036bf07870b67 (diff)
netfilter: xtables: stackptr should be percpu
commit f3c5c1bfd4 (netfilter: xtables: make ip_tables reentrant) introduced a performance regression, because stackptr array is shared by all cpus, adding cache line ping pongs. (16 cpus share a 64 bytes cache line) Fix this using alloc_percpu() Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-By: Jan Engelhardt <jengelh@medozas.de> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/netfilter/x_tables.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c00cc0c4d0b..24e5d01d27d 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -397,7 +397,7 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int *stackptr;
+ unsigned int __percpu *stackptr;
void ***jumpstack;
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */