/*
* This is a module which is used for logging packets to userspace via
* nfetlink.
*
* (C) 2005 by Harald Welte <laforge@netfilter.org>
*
* Based on the old ipv4-only ipt_ULOG.c:
* (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 2006-01-26 Harald Welte <laforge@netfilter.org>
* - Add optional local and global sequence number to detect lost
* events from userspace
*
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/netlink.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_log.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <net/sock.h>
#include <asm/atomic.h>
#ifdef CONFIG_BRIDGE_NETFILTER
#include "../bridge/br_private.h"
#endif
#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
#define PRINTR(x, args...) do { if (net_ratelimit()) \
printk(x, ## args); } while (0);
#if 0
#define UDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
__FILE__, __LINE__, __FUNCTION__, \
## args)
#else
#define UDEBUG(x, ...)
#endif
struct nfulnl_instance {
struct hlist_node hlist; /* global list of instances */
spinlock_t lock;
atomic_t use; /* use count */
unsigned int qlen; /* number of nlmsgs in skb */
struct sk_buff *skb; /* pre-allocatd skb */
struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
struct timer_list timer;
int peer_pid; /* PID of the peer process */
/* configurable parameters */
unsigned int flushtimeout; /* timeout until queue flush */
unsigned int nlbufsiz; /* netlink buffer allocation size */
unsigned int qthreshold; /* threshold of the queue */
u_int32_t copy_range;
u_int32_t seq; /* instance-local sequential counter */
u_int16_t group_num; /* number of this queue */
u_int16_t flags;
u_int8_t copy_mode;
};
static DEFINE_RWLOCK(instances_lock);
static atomic_t global_seq;
#define INSTANCE_BUCKETS 16
static struct hlist_head instance_table[INSTANCE_BUCKETS];
static unsigned int hash_init;
static inline u_int8_t instance_hashfn(u_int16_t group_num)
{
return ((group_num & 0xff) % INSTANCE_BUCKETS);
}
static struct nfulnl_instance *
__instance_lookup(u_int16_t group_num)
{
struct hlist_head *head;
struct hlist_node *pos;
struct nfulnl_instance *inst;
UDEBUG("entering (group_num=%u)\n", group_num);
head = &instance_table[instance_hashfn(group_num)];
hlist_for_each_entry(inst, pos, head, hlist) {
if (inst->group_num == group_num)
return inst;
}
return NULL;
}
static inline void
instance_get(struct nfulnl_instance *inst)
{
atomic_inc(&inst->use);
}
static struct nfulnl_instance *
instance_lookup_get(u_int16_t group_num)
{
struct nfulnl_instance *inst;
read_lock_bh(&instances_lock);
inst = __instance_lookup(group_num);
if (inst)
instance_get(inst);
read_unlock_bh(&instances_lock);
return inst;
}
static void
instance_put(struct nfulnl_instance *inst)
{
if (inst && atomic_dec_and_test(&inst->use)) {
UDEBUG("kfree(inst=%p)\n", inst);
kfree(inst);
}
}
static void nfulnl_timer(unsigned long data);
static struct nfulnl_instance *
instance_create(u_int16_t group_num, int pid)
{
struct nfulnl_instance *inst;
UDEBUG("entering (group_num=%u, pid=%d)\n", group_num,
pid);
write_lock_bh(&instances_lock);
if (__instance_lookup(group_num)) {
inst = NULL;
UDEBUG("aborting, instance already exists\n");
goto out_unlock;
}
inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
if (!inst)
goto out_unlock;
INIT_HLIST_NODE(&inst->hlist);
spin_lock_init(&inst->lock);
/* needs to be two, since we _put() after creation */
atomic_set(&inst->use, 2);
init_timer(&inst->timer);
inst->timer.function = nfulnl_timer;
inst->