diff options
Diffstat (limited to 'net/netfilter/nf_queue.c')
| -rw-r--r-- | net/netfilter/nf_queue.c | 284 |
1 files changed, 58 insertions, 226 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index b3a7db678b8..5d24b1fdb59 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -1,3 +1,8 @@ +/* + * Rusty Russell (C)2000 -- This code is GPL. + * Patrick McHardy (c) 2006-2012 + */ + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> @@ -14,85 +19,33 @@ #include "nf_internals.h" /* - * A queue handler may be registered for each protocol. Each is protected by - * long term mutex. The handler must provide an an outfn() to accept packets - * for queueing and must reinject all packets it receives, no matter what. + * Hook for nfnetlink_queue to register its queue handler. + * We do this so that most of the NFQUEUE code can be modular. + * + * Once the queue is registered it must reinject all packets it + * receives, no matter what. */ -static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly; - -static DEFINE_MUTEX(queue_handler_mutex); +static const struct nf_queue_handler __rcu *queue_handler __read_mostly; /* return EBUSY when somebody else is registered, return EEXIST if the * same handler is registered, return 0 in case of success. */ -int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +void nf_register_queue_handler(const struct nf_queue_handler *qh) { - int ret; - const struct nf_queue_handler *old; - - if (pf >= ARRAY_SIZE(queue_handler)) - return -EINVAL; - - mutex_lock(&queue_handler_mutex); - old = rcu_dereference_protected(queue_handler[pf], - lockdep_is_held(&queue_handler_mutex)); - if (old == qh) - ret = -EEXIST; - else if (old) - ret = -EBUSY; - else { - rcu_assign_pointer(queue_handler[pf], qh); - ret = 0; - } - mutex_unlock(&queue_handler_mutex); - - return ret; + /* should never happen, we only have one queueing backend in kernel */ + WARN_ON(rcu_access_pointer(queue_handler)); + rcu_assign_pointer(queue_handler, qh); } EXPORT_SYMBOL(nf_register_queue_handler); /* The caller must flush their queue before this */ -int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +void nf_unregister_queue_handler(void) { - const struct nf_queue_handler *old; - - if (pf >= ARRAY_SIZE(queue_handler)) - return -EINVAL; - - mutex_lock(&queue_handler_mutex); - old = rcu_dereference_protected(queue_handler[pf], - lockdep_is_held(&queue_handler_mutex)); - if (old && old != qh) { - mutex_unlock(&queue_handler_mutex); - return -EINVAL; - } - - RCU_INIT_POINTER(queue_handler[pf], NULL); - mutex_unlock(&queue_handler_mutex); - + RCU_INIT_POINTER(queue_handler, NULL); synchronize_rcu(); - - return 0; } EXPORT_SYMBOL(nf_unregister_queue_handler); -void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) -{ - u_int8_t pf; - - mutex_lock(&queue_handler_mutex); - for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) { - if (rcu_dereference_protected( - queue_handler[pf], - lockdep_is_held(&queue_handler_mutex) - ) == qh) - RCU_INIT_POINTER(queue_handler[pf], NULL); - } - mutex_unlock(&queue_handler_mutex); - - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); - -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) +void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { /* Release those devices we held, or Alexey will kill me. */ if (entry->indev) @@ -112,13 +65,42 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) /* Drop reference to owner of hook which queued us. */ module_put(entry->elem->owner); } +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + +/* Bump dev refs so they don't vanish while packet is out */ +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) +{ + if (!try_module_get(entry->elem->owner)) + return false; + + if (entry->indev) + dev_hold(entry->indev); + if (entry->outdev) + dev_hold(entry->outdev); +#ifdef CONFIG_BRIDGE_NETFILTER + if (entry->skb->nf_bridge) { + struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; + struct net_device *physdev; + + physdev = nf_bridge->physindev; + if (physdev) + dev_hold(physdev); + physdev = nf_bridge->physoutdev; + if (physdev) + dev_hold(physdev); + } +#endif + + return true; +} +EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); /* * Any packet that leaves via this function must come back * through nf_reinject(). */ -static int __nf_queue(struct sk_buff *skb, - struct list_head *elem, +int nf_queue(struct sk_buff *skb, + struct nf_hook_ops *elem, u_int8_t pf, unsigned int hook, struct net_device *indev, struct net_device *outdev, @@ -127,17 +109,13 @@ static int __nf_queue(struct sk_buff *skb, { int status = -ENOENT; struct nf_queue_entry *entry = NULL; -#ifdef CONFIG_BRIDGE_NETFILTER - struct net_device *physindev; - struct net_device *physoutdev; -#endif const struct nf_afinfo *afinfo; const struct nf_queue_handler *qh; /* QUEUE == DROP if no one is waiting, to be safe. */ rcu_read_lock(); - qh = rcu_dereference(queue_handler[pf]); + qh = rcu_dereference(queue_handler); if (!qh) { status = -ESRCH; goto err_unlock; @@ -155,34 +133,19 @@ static int __nf_queue(struct sk_buff *skb, *entry = (struct nf_queue_entry) { .skb = skb, - .elem = list_entry(elem, struct nf_hook_ops, list), + .elem = elem, .pf = pf, .hook = hook, .indev = indev, .outdev = outdev, .okfn = okfn, + .size = sizeof(*entry) + afinfo->route_key_size, }; - /* If it's going away, ignore hook. */ - if (!try_module_get(entry->elem->owner)) { + if (!nf_queue_entry_get_refs(entry)) { status = -ECANCELED; goto err_unlock; } - /* Bump dev refs so they don't vanish while packet is out */ - if (indev) - dev_hold(indev); - if (outdev) - dev_hold(outdev); -#ifdef CONFIG_BRIDGE_NETFILTER - if (skb->nf_bridge) { - physindev = skb->nf_bridge->physindev; - if (physindev) - dev_hold(physindev); - physoutdev = skb->nf_bridge->physoutdev; - if (physoutdev) - dev_hold(physoutdev); - } -#endif skb_dst_force(skb); afinfo->saveroute(skb, entry); status = qh->outfn(entry, queuenum); @@ -203,67 +166,10 @@ err: return status; } -int nf_queue(struct sk_buff *skb, - struct list_head *elem, - u_int8_t pf, unsigned int hook, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sk_buff *), - unsigned int queuenum) -{ - struct sk_buff *segs; - int err; - unsigned int queued; - - if (!skb_is_gso(skb)) - return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, - queuenum); - - switch (pf) { - case NFPROTO_IPV4: - skb->protocol = htons(ETH_P_IP); - break; - case NFPROTO_IPV6: - skb->protocol = htons(ETH_P_IPV6); - break; - } - - segs = skb_gso_segment(skb, 0); - /* Does not use PTR_ERR to limit the number of error codes that can be - * returned by nf_queue. For instance, callers rely on -ECANCELED to mean - * 'ignore this hook'. - */ - if (IS_ERR(segs)) - return -EINVAL; - - queued = 0; - err = 0; - do { - struct sk_buff *nskb = segs->next; - - segs->next = NULL; - if (err == 0) - err = __nf_queue(segs, elem, pf, hook, indev, - outdev, okfn, queuenum); - if (err == 0) - queued++; - else - kfree_skb(segs); - segs = nskb; - } while (segs); - - /* also free orig skb if only some segments were queued */ - if (unlikely(err && queued)) - err = 0; - if (err == 0) - kfree_skb(skb); - return err; -} - void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) { struct sk_buff *skb = entry->skb; - struct list_head *elem = &entry->elem->list; + struct nf_hook_ops *elem = entry->elem; const struct nf_afinfo *afinfo; int err; @@ -273,7 +179,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) /* Continue traversal iff userspace said ok... */ if (verdict == NF_REPEAT) { - elem = elem->prev; + elem = list_entry(elem->list.prev, struct nf_hook_ops, list); verdict = NF_ACCEPT; } @@ -299,9 +205,9 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) local_bh_enable(); break; case NF_QUEUE: - err = __nf_queue(skb, elem, entry->pf, entry->hook, - entry->indev, entry->outdev, entry->okfn, - verdict >> NF_VERDICT_QBITS); + err = nf_queue(skb, elem, entry->pf, entry->hook, + entry->indev, entry->outdev, entry->okfn, + verdict >> NF_VERDICT_QBITS); if (err < 0) { if (err == -ECANCELED) goto next_hook; @@ -320,77 +226,3 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) kfree(entry); } EXPORT_SYMBOL(nf_reinject); - -#ifdef CONFIG_PROC_FS -static void *seq_start(struct seq_file *seq, loff_t *pos) -{ - if (*pos >= ARRAY_SIZE(queue_handler)) - return NULL; - - return pos; -} - -static void *seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - (*pos)++; - - if (*pos >= ARRAY_SIZE(queue_handler)) - return NULL; - - return pos; -} - -static void seq_stop(struct seq_file *s, void *v) -{ - -} - -static int seq_show(struct seq_file *s, void *v) -{ - int ret; - loff_t *pos = v; - const struct nf_queue_handler *qh; - - rcu_read_lock(); - qh = rcu_dereference(queue_handler[*pos]); - if (!qh) - ret = seq_printf(s, "%2lld NONE\n", *pos); - else - ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); - rcu_read_unlock(); - - return ret; -} - -static const struct seq_operations nfqueue_seq_ops = { - .start = seq_start, - .next = seq_next, - .stop = seq_stop, - .show = seq_show, -}; - -static int nfqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &nfqueue_seq_ops); -} - -static const struct file_operations nfqueue_file_ops = { - .owner = THIS_MODULE, - .open = nfqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; -#endif /* PROC_FS */ - - -int __init netfilter_queue_init(void) -{ -#ifdef CONFIG_PROC_FS - if (!proc_create("nf_queue", S_IRUGO, - proc_net_netfilter, &nfqueue_file_ops)) - return -1; -#endif - return 0; -} - |
