diff options
Diffstat (limited to 'net/netfilter/nf_queue.c')
| -rw-r--r-- | net/netfilter/nf_queue.c | 289 | 
1 files changed, 79 insertions, 210 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 74aebed5bd2..5d24b1fdb59 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -1,3 +1,8 @@ +/* + * Rusty Russell (C)2000 -- This code is GPL. + * Patrick McHardy (c) 2006-2012 + */ +  #include <linux/kernel.h>  #include <linux/slab.h>  #include <linux/init.h> @@ -14,75 +19,33 @@  #include "nf_internals.h"  /* - * A queue handler may be registered for each protocol.  Each is protected by - * long term mutex.  The handler must provide an an outfn() to accept packets - * for queueing and must reinject all packets it receives, no matter what. + * Hook for nfnetlink_queue to register its queue handler. + * We do this so that most of the NFQUEUE code can be modular. + * + * Once the queue is registered it must reinject all packets it + * receives, no matter what.   */ -static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly; - -static DEFINE_MUTEX(queue_handler_mutex); +static const struct nf_queue_handler __rcu *queue_handler __read_mostly;  /* return EBUSY when somebody else is registered, return EEXIST if the   * same handler is registered, return 0 in case of success. */ -int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +void nf_register_queue_handler(const struct nf_queue_handler *qh)  { -	int ret; - -	if (pf >= ARRAY_SIZE(queue_handler)) -		return -EINVAL; - -	mutex_lock(&queue_handler_mutex); -	if (queue_handler[pf] == qh) -		ret = -EEXIST; -	else if (queue_handler[pf]) -		ret = -EBUSY; -	else { -		rcu_assign_pointer(queue_handler[pf], qh); -		ret = 0; -	} -	mutex_unlock(&queue_handler_mutex); - -	return ret; +	/* should never happen, we only have one queueing backend in kernel */ +	WARN_ON(rcu_access_pointer(queue_handler)); +	rcu_assign_pointer(queue_handler, qh);  }  EXPORT_SYMBOL(nf_register_queue_handler);  /* The caller must flush their queue before this */ -int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +void nf_unregister_queue_handler(void)  { -	if (pf >= ARRAY_SIZE(queue_handler)) -		return -EINVAL; - -	mutex_lock(&queue_handler_mutex); -	if (queue_handler[pf] && queue_handler[pf] != qh) { -		mutex_unlock(&queue_handler_mutex); -		return -EINVAL; -	} - -	rcu_assign_pointer(queue_handler[pf], NULL); -	mutex_unlock(&queue_handler_mutex); - +	RCU_INIT_POINTER(queue_handler, NULL);  	synchronize_rcu(); - -	return 0;  }  EXPORT_SYMBOL(nf_unregister_queue_handler); -void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) -{ -	u_int8_t pf; - -	mutex_lock(&queue_handler_mutex); -	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  { -		if (queue_handler[pf] == qh) -			rcu_assign_pointer(queue_handler[pf], NULL); -	} -	mutex_unlock(&queue_handler_mutex); - -	synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); - -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)  {  	/* Release those devices we held, or Alexey will kill me. */  	if (entry->indev) @@ -102,75 +65,87 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)  	/* Drop reference to owner of hook which queued us. */  	module_put(entry->elem->owner);  } +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + +/* Bump dev refs so they don't vanish while packet is out */ +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) +{ +	if (!try_module_get(entry->elem->owner)) +		return false; + +	if (entry->indev) +		dev_hold(entry->indev); +	if (entry->outdev) +		dev_hold(entry->outdev); +#ifdef CONFIG_BRIDGE_NETFILTER +	if (entry->skb->nf_bridge) { +		struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; +		struct net_device *physdev; + +		physdev = nf_bridge->physindev; +		if (physdev) +			dev_hold(physdev); +		physdev = nf_bridge->physoutdev; +		if (physdev) +			dev_hold(physdev); +	} +#endif + +	return true; +} +EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);  /*   * Any packet that leaves via this function must come back   * through nf_reinject().   */ -static int __nf_queue(struct sk_buff *skb, -		      struct list_head *elem, +int nf_queue(struct sk_buff *skb, +		      struct nf_hook_ops *elem,  		      u_int8_t pf, unsigned int hook,  		      struct net_device *indev,  		      struct net_device *outdev,  		      int (*okfn)(struct sk_buff *),  		      unsigned int queuenum)  { -	int status; +	int status = -ENOENT;  	struct nf_queue_entry *entry = NULL; -#ifdef CONFIG_BRIDGE_NETFILTER -	struct net_device *physindev; -	struct net_device *physoutdev; -#endif  	const struct nf_afinfo *afinfo;  	const struct nf_queue_handler *qh; -	/* QUEUE == DROP if noone is waiting, to be safe. */ +	/* QUEUE == DROP if no one is waiting, to be safe. */  	rcu_read_lock(); -	qh = rcu_dereference(queue_handler[pf]); -	if (!qh) +	qh = rcu_dereference(queue_handler); +	if (!qh) { +		status = -ESRCH;  		goto err_unlock; +	}  	afinfo = nf_get_afinfo(pf);  	if (!afinfo)  		goto err_unlock;  	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); -	if (!entry) +	if (!entry) { +		status = -ENOMEM;  		goto err_unlock; +	}  	*entry = (struct nf_queue_entry) {  		.skb	= skb, -		.elem	= list_entry(elem, struct nf_hook_ops, list), +		.elem	= elem,  		.pf	= pf,  		.hook	= hook,  		.indev	= indev,  		.outdev	= outdev,  		.okfn	= okfn, +		.size	= sizeof(*entry) + afinfo->route_key_size,  	}; -	/* If it's going away, ignore hook. */ -	if (!try_module_get(entry->elem->owner)) { -		rcu_read_unlock(); -		kfree(entry); -		return 0; -	} - -	/* Bump dev refs so they don't vanish while packet is out */ -	if (indev) -		dev_hold(indev); -	if (outdev) -		dev_hold(outdev); -#ifdef CONFIG_BRIDGE_NETFILTER -	if (skb->nf_bridge) { -		physindev = skb->nf_bridge->physindev; -		if (physindev) -			dev_hold(physindev); -		physoutdev = skb->nf_bridge->physoutdev; -		if (physoutdev) -			dev_hold(physoutdev); +	if (!nf_queue_entry_get_refs(entry)) { +		status = -ECANCELED; +		goto err_unlock;  	} -#endif  	skb_dst_force(skb);  	afinfo->saveroute(skb, entry);  	status = qh->outfn(entry, queuenum); @@ -182,61 +157,21 @@ static int __nf_queue(struct sk_buff *skb,  		goto err;  	} -	return 1; +	return 0;  err_unlock:  	rcu_read_unlock();  err: -	kfree_skb(skb);  	kfree(entry); -	return 1; -} - -int nf_queue(struct sk_buff *skb, -	     struct list_head *elem, -	     u_int8_t pf, unsigned int hook, -	     struct net_device *indev, -	     struct net_device *outdev, -	     int (*okfn)(struct sk_buff *), -	     unsigned int queuenum) -{ -	struct sk_buff *segs; - -	if (!skb_is_gso(skb)) -		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, -				  queuenum); - -	switch (pf) { -	case NFPROTO_IPV4: -		skb->protocol = htons(ETH_P_IP); -		break; -	case NFPROTO_IPV6: -		skb->protocol = htons(ETH_P_IPV6); -		break; -	} - -	segs = skb_gso_segment(skb, 0); -	kfree_skb(skb); -	if (IS_ERR(segs)) -		return 1; - -	do { -		struct sk_buff *nskb = segs->next; - -		segs->next = NULL; -		if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, -				queuenum)) -			kfree_skb(segs); -		segs = nskb; -	} while (segs); -	return 1; +	return status;  }  void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)  {  	struct sk_buff *skb = entry->skb; -	struct list_head *elem = &entry->elem->list; +	struct nf_hook_ops *elem = entry->elem;  	const struct nf_afinfo *afinfo; +	int err;  	rcu_read_lock(); @@ -244,7 +179,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)  	/* Continue traversal iff userspace said ok... */  	if (verdict == NF_REPEAT) { -		elem = elem->prev; +		elem = list_entry(elem->list.prev, struct nf_hook_ops, list);  		verdict = NF_ACCEPT;  	} @@ -270,12 +205,20 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)  		local_bh_enable();  		break;  	case NF_QUEUE: -		if (!__nf_queue(skb, elem, entry->pf, entry->hook, +		err = nf_queue(skb, elem, entry->pf, entry->hook,  				entry->indev, entry->outdev, entry->okfn, -				verdict >> NF_VERDICT_BITS)) -			goto next_hook; +				verdict >> NF_VERDICT_QBITS); +		if (err < 0) { +			if (err == -ECANCELED) +				goto next_hook; +			if (err == -ESRCH && +			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) +				goto next_hook; +			kfree_skb(skb); +		}  		break;  	case NF_STOLEN: +		break;  	default:  		kfree_skb(skb);  	} @@ -283,77 +226,3 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)  	kfree(entry);  }  EXPORT_SYMBOL(nf_reinject); - -#ifdef CONFIG_PROC_FS -static void *seq_start(struct seq_file *seq, loff_t *pos) -{ -	if (*pos >= ARRAY_SIZE(queue_handler)) -		return NULL; - -	return pos; -} - -static void *seq_next(struct seq_file *s, void *v, loff_t *pos) -{ -	(*pos)++; - -	if (*pos >= ARRAY_SIZE(queue_handler)) -		return NULL; - -	return pos; -} - -static void seq_stop(struct seq_file *s, void *v) -{ - -} - -static int seq_show(struct seq_file *s, void *v) -{ -	int ret; -	loff_t *pos = v; -	const struct nf_queue_handler *qh; - -	rcu_read_lock(); -	qh = rcu_dereference(queue_handler[*pos]); -	if (!qh) -		ret = seq_printf(s, "%2lld NONE\n", *pos); -	else -		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); -	rcu_read_unlock(); - -	return ret; -} - -static const struct seq_operations nfqueue_seq_ops = { -	.start	= seq_start, -	.next	= seq_next, -	.stop	= seq_stop, -	.show	= seq_show, -}; - -static int nfqueue_open(struct inode *inode, struct file *file) -{ -	return seq_open(file, &nfqueue_seq_ops); -} - -static const struct file_operations nfqueue_file_ops = { -	.owner	 = THIS_MODULE, -	.open	 = nfqueue_open, -	.read	 = seq_read, -	.llseek	 = seq_lseek, -	.release = seq_release, -}; -#endif /* PROC_FS */ - - -int __init netfilter_queue_init(void) -{ -#ifdef CONFIG_PROC_FS -	if (!proc_create("nf_queue", S_IRUGO, -			 proc_net_netfilter, &nfqueue_file_ops)) -		return -1; -#endif -	return 0; -} -  | 
