diff options
Diffstat (limited to 'net/netfilter/nf_queue.c')
| -rw-r--r-- | net/netfilter/nf_queue.c | 419 |
1 files changed, 140 insertions, 279 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 0cef1433d66..5d24b1fdb59 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -1,4 +1,10 @@ +/* + * Rusty Russell (C)2000 -- This code is GPL. + * Patrick McHardy (c) 2006-2012 + */ + #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> @@ -7,361 +13,216 @@ #include <linux/seq_file.h> #include <linux/rcupdate.h> #include <net/protocol.h> +#include <net/netfilter/nf_queue.h> +#include <net/dst.h> #include "nf_internals.h" /* - * A queue handler may be registered for each protocol. Each is protected by - * long term mutex. The handler must provide an an outfn() to accept packets - * for queueing and must reinject all packets it receives, no matter what. + * Hook for nfnetlink_queue to register its queue handler. + * We do this so that most of the NFQUEUE code can be modular. + * + * Once the queue is registered it must reinject all packets it + * receives, no matter what. */ -static struct nf_queue_handler *queue_handler[NPROTO]; - -static DEFINE_MUTEX(queue_handler_mutex); +static const struct nf_queue_handler __rcu *queue_handler __read_mostly; /* return EBUSY when somebody else is registered, return EEXIST if the * same handler is registered, return 0 in case of success. */ -int nf_register_queue_handler(int pf, struct nf_queue_handler *qh) +void nf_register_queue_handler(const struct nf_queue_handler *qh) { - int ret; - - if (pf >= NPROTO) - return -EINVAL; - - mutex_lock(&queue_handler_mutex); - if (queue_handler[pf] == qh) - ret = -EEXIST; - else if (queue_handler[pf]) - ret = -EBUSY; - else { - rcu_assign_pointer(queue_handler[pf], qh); - ret = 0; - } - mutex_unlock(&queue_handler_mutex); - - return ret; + /* should never happen, we only have one queueing backend in kernel */ + WARN_ON(rcu_access_pointer(queue_handler)); + rcu_assign_pointer(queue_handler, qh); } EXPORT_SYMBOL(nf_register_queue_handler); /* The caller must flush their queue before this */ -int nf_unregister_queue_handler(int pf, struct nf_queue_handler *qh) +void nf_unregister_queue_handler(void) { - if (pf >= NPROTO) - return -EINVAL; - - mutex_lock(&queue_handler_mutex); - if (queue_handler[pf] != qh) { - mutex_unlock(&queue_handler_mutex); - return -EINVAL; - } - - rcu_assign_pointer(queue_handler[pf], NULL); - mutex_unlock(&queue_handler_mutex); - + RCU_INIT_POINTER(queue_handler, NULL); synchronize_rcu(); - - return 0; } EXPORT_SYMBOL(nf_unregister_queue_handler); -void nf_unregister_queue_handlers(struct nf_queue_handler *qh) +void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { - int pf; + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) + dev_put(entry->indev); + if (entry->outdev) + dev_put(entry->outdev); +#ifdef CONFIG_BRIDGE_NETFILTER + if (entry->skb->nf_bridge) { + struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; - mutex_lock(&queue_handler_mutex); - for (pf = 0; pf < NPROTO; pf++) { - if (queue_handler[pf] == qh) - rcu_assign_pointer(queue_handler[pf], NULL); + if (nf_bridge->physindev) + dev_put(nf_bridge->physindev); + if (nf_bridge->physoutdev) + dev_put(nf_bridge->physoutdev); } - mutex_unlock(&queue_handler_mutex); +#endif + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); +} +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); - synchronize_rcu(); +/* Bump dev refs so they don't vanish while packet is out */ +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) +{ + if (!try_module_get(entry->elem->owner)) + return false; + + if (entry->indev) + dev_hold(entry->indev); + if (entry->outdev) + dev_hold(entry->outdev); +#ifdef CONFIG_BRIDGE_NETFILTER + if (entry->skb->nf_bridge) { + struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; + struct net_device *physdev; + + physdev = nf_bridge->physindev; + if (physdev) + dev_hold(physdev); + physdev = nf_bridge->physoutdev; + if (physdev) + dev_hold(physdev); + } +#endif + + return true; } -EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); +EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); /* * Any packet that leaves via this function must come back * through nf_reinject(). */ -static int __nf_queue(struct sk_buff *skb, - struct list_head *elem, - int pf, unsigned int hook, +int nf_queue(struct sk_buff *skb, + struct nf_hook_ops *elem, + u_int8_t pf, unsigned int hook, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), unsigned int queuenum) { - int status; - struct nf_info *info; -#ifdef CONFIG_BRIDGE_NETFILTER - struct net_device *physindev = NULL; - struct net_device *physoutdev = NULL; -#endif - struct nf_afinfo *afinfo; - struct nf_queue_handler *qh; + int status = -ENOENT; + struct nf_queue_entry *entry = NULL; + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; - /* QUEUE == DROP if noone is waiting, to be safe. */ + /* QUEUE == DROP if no one is waiting, to be safe. */ rcu_read_lock(); - qh = rcu_dereference(queue_handler[pf]); + qh = rcu_dereference(queue_handler); if (!qh) { - rcu_read_unlock(); - kfree_skb(skb); - return 1; + status = -ESRCH; + goto err_unlock; } afinfo = nf_get_afinfo(pf); - if (!afinfo) { - rcu_read_unlock(); - kfree_skb(skb); - return 1; - } - - info = kmalloc(sizeof(*info) + afinfo->route_key_size, GFP_ATOMIC); - if (!info) { - if (net_ratelimit()) - printk(KERN_ERR "OOM queueing packet %p\n", - skb); - rcu_read_unlock(); - kfree_skb(skb); - return 1; - } - - *info = (struct nf_info) { - (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; - - /* If it's going away, ignore hook. */ - if (!try_module_get(info->elem->owner)) { - rcu_read_unlock(); - kfree(info); - return 0; - } - - /* Bump dev refs so they don't vanish while packet is out */ - if (indev) dev_hold(indev); - if (outdev) dev_hold(outdev); - -#ifdef CONFIG_BRIDGE_NETFILTER - if (skb->nf_bridge) { - physindev = skb->nf_bridge->physindev; - if (physindev) dev_hold(physindev); - physoutdev = skb->nf_bridge->physoutdev; - if (physoutdev) dev_hold(physoutdev); - } -#endif - afinfo->saveroute(skb, info); - status = qh->outfn(skb, info, queuenum, qh->data); + if (!afinfo) + goto err_unlock; + + entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); + if (!entry) { + status = -ENOMEM; + goto err_unlock; + } + + *entry = (struct nf_queue_entry) { + .skb = skb, + .elem = elem, + .pf = pf, + .hook = hook, + .indev = indev, + .outdev = outdev, + .okfn = okfn, + .size = sizeof(*entry) + afinfo->route_key_size, + }; + + if (!nf_queue_entry_get_refs(entry)) { + status = -ECANCELED; + goto err_unlock; + } + skb_dst_force(skb); + afinfo->saveroute(skb, entry); + status = qh->outfn(entry, queuenum); rcu_read_unlock(); if (status < 0) { - /* James M doesn't say fuck enough. */ - if (indev) dev_put(indev); - if (outdev) dev_put(outdev); -#ifdef CONFIG_BRIDGE_NETFILTER - if (physindev) dev_put(physindev); - if (physoutdev) dev_put(physoutdev); -#endif - module_put(info->elem->owner); - kfree(info); - kfree_skb(skb); - - return 1; + nf_queue_entry_release_refs(entry); + goto err; } - return 1; -} - -int nf_queue(struct sk_buff *skb, - struct list_head *elem, - int pf, unsigned int hook, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sk_buff *), - unsigned int queuenum) -{ - struct sk_buff *segs; - - if (!skb_is_gso(skb)) - return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, - queuenum); - - switch (pf) { - case AF_INET: - skb->protocol = htons(ETH_P_IP); - break; - case AF_INET6: - skb->protocol = htons(ETH_P_IPV6); - break; - } - - segs = skb_gso_segment(skb, 0); - kfree_skb(skb); - if (unlikely(IS_ERR(segs))) - return 1; - - do { - struct sk_buff *nskb = segs->next; + return 0; - segs->next = NULL; - if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, - queuenum)) - kfree_skb(segs); - segs = nskb; - } while (segs); - return 1; +err_unlock: + rcu_read_unlock(); +err: + kfree(entry); + return status; } -void nf_reinject(struct sk_buff *skb, struct nf_info *info, - unsigned int verdict) +void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) { - struct list_head *elem = &info->elem->list; - struct list_head *i; - struct nf_afinfo *afinfo; + struct sk_buff *skb = entry->skb; + struct nf_hook_ops *elem = entry->elem; + const struct nf_afinfo *afinfo; + int err; rcu_read_lock(); - /* Release those devices we held, or Alexey will kill me. */ - if (info->indev) dev_put(info->indev); - if (info->outdev) dev_put(info->outdev); -#ifdef CONFIG_BRIDGE_NETFILTER - if (skb->nf_bridge) { - if (skb->nf_bridge->physindev) - dev_put(skb->nf_bridge->physindev); - if (skb->nf_bridge->physoutdev) - dev_put(skb->nf_bridge->physoutdev); - } -#endif - - /* Drop reference to owner of hook which queued us. */ - module_put(info->elem->owner); - - list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) { - if (i == elem) - break; - } - - if (i == &nf_hooks[info->pf][info->hook]) { - /* The module which sent it to userspace is gone. */ - NFDEBUG("%s: module disappeared, dropping packet.\n", - __FUNCTION__); - verdict = NF_DROP; - } + nf_queue_entry_release_refs(entry); /* Continue traversal iff userspace said ok... */ if (verdict == NF_REPEAT) { - elem = elem->prev; + elem = list_entry(elem->list.prev, struct nf_hook_ops, list); verdict = NF_ACCEPT; } if (verdict == NF_ACCEPT) { - afinfo = nf_get_afinfo(info->pf); - if (!afinfo || afinfo->reroute(skb, info) < 0) + afinfo = nf_get_afinfo(entry->pf); + if (!afinfo || afinfo->reroute(skb, entry) < 0) verdict = NF_DROP; } if (verdict == NF_ACCEPT) { next_hook: - verdict = nf_iterate(&nf_hooks[info->pf][info->hook], - skb, info->hook, - info->indev, info->outdev, &elem, - info->okfn, INT_MIN); + verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], + skb, entry->hook, + entry->indev, entry->outdev, &elem, + entry->okfn, INT_MIN); } switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: case NF_STOP: - info->okfn(skb); - case NF_STOLEN: + local_bh_disable(); + entry->okfn(skb); + local_bh_enable(); break; case NF_QUEUE: - if (!__nf_queue(skb, elem, info->pf, info->hook, - info->indev, info->outdev, info->okfn, - verdict >> NF_VERDICT_BITS)) - goto next_hook; + err = nf_queue(skb, elem, entry->pf, entry->hook, + entry->indev, entry->outdev, entry->okfn, + verdict >> NF_VERDICT_QBITS); + if (err < 0) { + if (err == -ECANCELED) + goto next_hook; + if (err == -ESRCH && + (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) + goto next_hook; + kfree_skb(skb); + } + break; + case NF_STOLEN: break; default: kfree_skb(skb); } rcu_read_unlock(); - kfree(info); - return; + kfree(entry); } EXPORT_SYMBOL(nf_reinject); - -#ifdef CONFIG_PROC_FS -static void *seq_start(struct seq_file *seq, loff_t *pos) -{ - if (*pos >= NPROTO) - return NULL; - - return pos; -} - -static void *seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - (*pos)++; - - if (*pos >= NPROTO) - return NULL; - - return pos; -} - -static void seq_stop(struct seq_file *s, void *v) -{ - -} - -static int seq_show(struct seq_file *s, void *v) -{ - int ret; - loff_t *pos = v; - struct nf_queue_handler *qh; - - rcu_read_lock(); - qh = rcu_dereference(queue_handler[*pos]); - if (!qh) - ret = seq_printf(s, "%2lld NONE\n", *pos); - else - ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); - rcu_read_unlock(); - - return ret; -} - -static const struct seq_operations nfqueue_seq_ops = { - .start = seq_start, - .next = seq_next, - .stop = seq_stop, - .show = seq_show, -}; - -static int nfqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &nfqueue_seq_ops); -} - -static const struct file_operations nfqueue_file_ops = { - .owner = THIS_MODULE, - .open = nfqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; -#endif /* PROC_FS */ - - -int __init netfilter_queue_init(void) -{ -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *pde; - - pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter); - if (!pde) - return -1; - pde->proc_fops = &nfqueue_file_ops; -#endif - return 0; -} - |
