aboutsummaryrefslogtreecommitdiff
path: root/net/xfrm/xfrm_policy.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/xfrm/xfrm_policy.c')
-rw-r--r--net/xfrm/xfrm_policy.c2330
1 files changed, 1439 insertions, 891 deletions
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9fc4c315f6c..0525d78ba32 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,6 +26,7 @@
#include <linux/cache.h>
#include <linux/audit.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/xfrm.h>
#include <net/ip.h>
#ifdef CONFIG_XFRM_STATISTICS
@@ -34,58 +35,52 @@
#include "xfrm_hash.h"
-int sysctl_xfrm_larval_drop __read_mostly;
+#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
+#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
+#define XFRM_MAX_QUEUE_LEN 100
-#ifdef CONFIG_XFRM_STATISTICS
-DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
-EXPORT_SYMBOL(xfrm_statistics);
-#endif
-
-DEFINE_MUTEX(xfrm_cfg_mutex);
-EXPORT_SYMBOL(xfrm_cfg_mutex);
-
-static DEFINE_RWLOCK(xfrm_policy_lock);
-
-unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
-EXPORT_SYMBOL(xfrm_policy_count);
-
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
+static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+ __read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
-static struct work_struct xfrm_policy_gc_work;
-static HLIST_HEAD(xfrm_policy_gc_list);
-static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
-
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
+static int stale_bundle(struct dst_entry *dst);
+static int xfrm_bundle_ok(struct xfrm_dst *xdst);
+static void xfrm_policy_queue_process(unsigned long arg);
-static inline int
-__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
+ int dir);
+
+static inline bool
+__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
- return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
- addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
- !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
- !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
- (fl->proto == sel->proto || !sel->proto) &&
- (fl->oif == sel->ifindex || !sel->ifindex);
+ const struct flowi4 *fl4 = &fl->u.ip4;
+
+ return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+ addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
+ (fl4->flowi4_proto == sel->proto || !sel->proto) &&
+ (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
}
-static inline int
-__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+static inline bool
+__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
- return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
- addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
- !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
- !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
- (fl->proto == sel->proto || !sel->proto) &&
- (fl->oif == sel->ifindex || !sel->ifindex);
+ const struct flowi6 *fl6 = &fl->u.ip6;
+
+ return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
+ addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
+ (fl6->flowi6_proto == sel->proto || !sel->proto) &&
+ (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
}
-int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
- unsigned short family)
+bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+ unsigned short family)
{
switch (family) {
case AF_INET:
@@ -93,28 +88,74 @@ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
case AF_INET6:
return __xfrm6_selector_match(sel, fl);
}
- return 0;
+ return false;
+}
+
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+{
+ struct xfrm_policy_afinfo *afinfo;
+
+ if (unlikely(family >= NPROTO))
+ return NULL;
+ rcu_read_lock();
+ afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
+ if (unlikely(!afinfo))
+ rcu_read_unlock();
+ return afinfo;
+}
+
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+{
+ rcu_read_unlock();
+}
+
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ int family)
+{
+ struct xfrm_policy_afinfo *afinfo;
+ struct dst_entry *dst;
+
+ afinfo = xfrm_policy_get_afinfo(family);
+ if (unlikely(afinfo == NULL))
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+
+ xfrm_policy_put_afinfo(afinfo);
+
+ return dst;
}
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+ xfrm_address_t *prev_saddr,
+ xfrm_address_t *prev_daddr,
int family)
{
+ struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
xfrm_address_t *daddr = &x->id.daddr;
- struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
- if (x->type->flags & XFRM_TYPE_LOCAL_COADDR)
+ if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
saddr = x->coaddr;
- if (x->type->flags & XFRM_TYPE_REMOTE_COADDR)
+ daddr = prev_daddr;
+ }
+ if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
+ saddr = prev_saddr;
daddr = x->coaddr;
+ }
- afinfo = xfrm_policy_get_afinfo(family);
- if (unlikely(afinfo == NULL))
- return ERR_PTR(-EAFNOSUPPORT);
+ dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+
+ if (!IS_ERR(dst)) {
+ if (prev_saddr != saddr)
+ memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
+ if (prev_daddr != daddr)
+ memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
+ }
- dst = afinfo->dst_lookup(tos, saddr, daddr);
- xfrm_policy_put_afinfo(afinfo);
return dst;
}
@@ -128,7 +169,7 @@ static inline unsigned long make_jiffies(long secs)
static void xfrm_policy_timer(unsigned long data)
{
- struct xfrm_policy *xp = (struct xfrm_policy*)data;
+ struct xfrm_policy *xp = (struct xfrm_policy *)data;
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
@@ -136,7 +177,7 @@ static void xfrm_policy_timer(unsigned long data)
read_lock(&xp->lock);
- if (xp->dead)
+ if (unlikely(xp->walk.dead))
goto out;
dir = xfrm_policy_id2dir(xp->index);
@@ -196,24 +237,59 @@ expired:
xfrm_pol_put(xp);
}
+static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ if (unlikely(pol->walk.dead))
+ flo = NULL;
+ else
+ xfrm_pol_hold(pol);
+
+ return flo;
+}
+
+static int xfrm_policy_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ return !pol->walk.dead;
+}
+
+static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
+{
+ xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
+}
+
+static const struct flow_cache_ops xfrm_policy_fc_ops = {
+ .get = xfrm_policy_flo_get,
+ .check = xfrm_policy_flo_check,
+ .delete = xfrm_policy_flo_delete,
+};
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
*/
-struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
+struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
{
struct xfrm_policy *policy;
policy = kzalloc(sizeof(struct xfrm_policy), gfp);
if (policy) {
+ write_pnet(&policy->xp_net, net);
+ INIT_LIST_HEAD(&policy->walk.all);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
atomic_set(&policy->refcnt, 1);
+ skb_queue_head_init(&policy->polq.hold_queue);
setup_timer(&policy->timer, xfrm_policy_timer,
(unsigned long)policy);
+ setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
+ (unsigned long)policy);
+ policy->flo.ops = &xfrm_policy_fc_ops;
}
return policy;
}
@@ -223,49 +299,22 @@ EXPORT_SYMBOL(xfrm_policy_alloc);
void xfrm_policy_destroy(struct xfrm_policy *policy)
{
- BUG_ON(!policy->dead);
+ BUG_ON(!policy->walk.dead);
- BUG_ON(policy->bundles);
-
- if (del_timer(&policy->timer))
+ if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
- security_xfrm_policy_free(policy);
+ security_xfrm_policy_free(policy->security);
kfree(policy);
}
EXPORT_SYMBOL(xfrm_policy_destroy);
-static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- while ((dst = policy->bundles) != NULL) {
- policy->bundles = dst->next;
- dst_free(dst);
- }
-
- if (del_timer(&policy->timer))
- atomic_dec(&policy->refcnt);
-
- if (atomic_read(&policy->refcnt) > 1)
- flow_cache_flush();
-
- xfrm_pol_put(policy);
-}
-
-static void xfrm_policy_gc_task(struct work_struct *work)
+static void xfrm_queue_purge(struct sk_buff_head *list)
{
- struct xfrm_policy *policy;
- struct hlist_node *entry, *tmp;
- struct hlist_head gc_list;
-
- spin_lock_bh(&xfrm_policy_gc_lock);
- gc_list.first = xfrm_policy_gc_list.first;
- INIT_HLIST_HEAD(&xfrm_policy_gc_list);
- spin_unlock_bh(&xfrm_policy_gc_lock);
+ struct sk_buff *skb;
- hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
- xfrm_policy_gc_kill(policy);
+ while ((skb = skb_dequeue(list)) != NULL)
+ kfree_skb(skb);
}
/* Rule must be locked. Release descentant resources, announce
@@ -274,84 +323,75 @@ static void xfrm_policy_gc_task(struct work_struct *work)
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
- int dead;
+ policy->walk.dead = 1;
- write_lock_bh(&policy->lock);
- dead = policy->dead;
- policy->dead = 1;
- write_unlock_bh(&policy->lock);
+ atomic_inc(&policy->genid);
- if (unlikely(dead)) {
- WARN_ON(1);
- return;
- }
+ if (del_timer(&policy->polq.hold_timer))
+ xfrm_pol_put(policy);
+ xfrm_queue_purge(&policy->polq.hold_queue);
- spin_lock(&xfrm_policy_gc_lock);
- hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
- spin_unlock(&xfrm_policy_gc_lock);
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
- schedule_work(&xfrm_policy_gc_work);
+ xfrm_pol_put(policy);
}
-struct xfrm_policy_hash {
- struct hlist_head *table;
- unsigned int hmask;
-};
-
-static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
-static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
-static struct hlist_head *xfrm_policy_byidx __read_mostly;
-static unsigned int xfrm_idx_hmask __read_mostly;
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
-static inline unsigned int idx_hash(u32 index)
+static inline unsigned int idx_hash(struct net *net, u32 index)
{
- return __idx_hash(index, xfrm_idx_hmask);
+ return __idx_hash(index, net->xfrm.policy_idx_hmask);
}
-static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
+static struct hlist_head *policy_hash_bysel(struct net *net,
+ const struct xfrm_selector *sel,
+ unsigned short family, int dir)
{
- unsigned int hmask = xfrm_policy_bydst[dir].hmask;
+ unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash = __sel_hash(sel, family, hmask);
return (hash == hmask + 1 ?
- &xfrm_policy_inexact[dir] :
- xfrm_policy_bydst[dir].table + hash);
+ &net->xfrm.policy_inexact[dir] :
+ net->xfrm.policy_bydst[dir].table + hash);
}
-static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
+static struct hlist_head *policy_hash_direct(struct net *net,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ unsigned short family, int dir)
{
- unsigned int hmask = xfrm_policy_bydst[dir].hmask;
+ unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
- return xfrm_policy_bydst[dir].table + hash;
+ return net->xfrm.policy_bydst[dir].table + hash;
}
static void xfrm_dst_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp, *entry0 = NULL;
+ struct hlist_node *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
redo:
- hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
+ hlist_for_each_entry_safe(pol, tmp, list, bydst) {
unsigned int h;
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask);
if (!entry0) {
- hlist_del(entry);
+ hlist_del(&pol->bydst);
hlist_add_head(&pol->bydst, ndsttable+h);
h0 = h;
} else {
if (h != h0)
continue;
- hlist_del(entry);
+ hlist_del(&pol->bydst);
hlist_add_after(entry0, &pol->bydst);
}
- entry0 = entry;
+ entry0 = &pol->bydst;
}
if (!hlist_empty(list)) {
entry0 = NULL;
@@ -363,10 +403,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list,
struct hlist_head *nidxtable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
struct xfrm_policy *pol;
- hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
+ hlist_for_each_entry_safe(pol, tmp, list, byidx) {
unsigned int h;
h = __idx_hash(pol->index, nhashmask);
@@ -379,60 +419,60 @@ static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
return ((old_hmask + 1) << 1) - 1;
}
-static void xfrm_bydst_resize(int dir)
+static void xfrm_bydst_resize(struct net *net, int dir)
{
- unsigned int hmask = xfrm_policy_bydst[dir].hmask;
+ unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
- struct hlist_head *odst = xfrm_policy_bydst[dir].table;
+ struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
struct hlist_head *ndst = xfrm_hash_alloc(nsize);
int i;
if (!ndst)
return;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
- xfrm_policy_bydst[dir].table = ndst;
- xfrm_policy_bydst[dir].hmask = nhashmask;
+ net->xfrm.policy_bydst[dir].table = ndst;
+ net->xfrm.policy_bydst[dir].hmask = nhashmask;
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
-static void xfrm_byidx_resize(int total)
+static void xfrm_byidx_resize(struct net *net, int total)
{
- unsigned int hmask = xfrm_idx_hmask;
+ unsigned int hmask = net->xfrm.policy_idx_hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
- struct hlist_head *oidx = xfrm_policy_byidx;
+ struct hlist_head *oidx = net->xfrm.policy_byidx;
struct hlist_head *nidx = xfrm_hash_alloc(nsize);
int i;
if (!nidx)
return;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
- xfrm_policy_byidx = nidx;
- xfrm_idx_hmask = nhashmask;
+ net->xfrm.policy_byidx = nidx;
+ net->xfrm.policy_idx_hmask = nhashmask;
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
-static inline int xfrm_bydst_should_resize(int dir, int *total)
+static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
{
- unsigned int cnt = xfrm_policy_count[dir];
- unsigned int hmask = xfrm_policy_bydst[dir].hmask;
+ unsigned int cnt = net->xfrm.policy_count[dir];
+ unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
if (total)
*total += cnt;
@@ -444,9 +484,9 @@ static inline int xfrm_bydst_should_resize(int dir, int *total)
return 0;
}
-static inline int xfrm_byidx_should_resize(int total)
+static inline int xfrm_byidx_should_resize(struct net *net, int total)
{
- unsigned int hmask = xfrm_idx_hmask;
+ unsigned int hmask = net->xfrm.policy_idx_hmask;
if ((hmask + 1) < xfrm_policy_hashmax &&
total > hmask)
@@ -455,61 +495,65 @@ static inline int xfrm_byidx_should_resize(int total)
return 0;
}
-void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
- read_lock_bh(&xfrm_policy_lock);
- si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
- si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
- si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
- si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
- si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
- si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
- si->spdhcnt = xfrm_idx_hmask;
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
+ si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
+ si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
+ si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
+ si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
+ si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
+ si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
+ si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(struct work_struct *__unused)
+static void xfrm_hash_resize(struct work_struct *work)
{
+ struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
int dir, total;
mutex_lock(&hash_resize_mutex);
total = 0;
for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
- if (xfrm_bydst_should_resize(dir, &total))
- xfrm_bydst_resize(dir);
+ if (xfrm_bydst_should_resize(net, dir, &total))
+ xfrm_bydst_resize(net, dir);
}
- if (xfrm_byidx_should_resize(total))
- xfrm_byidx_resize(total);
+ if (xfrm_byidx_should_resize(net, total))
+ xfrm_byidx_resize(net, total);
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
-
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
-static u32 xfrm_gen_index(u8 type, int dir)
+static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
static u32 idx_generator;
for (;;) {
- struct hlist_node *entry;
struct hlist_head *list;
struct xfrm_policy *p;
u32 idx;
int found;
- idx = (idx_generator | dir);
- idx_generator += 8;
+ if (!index) {
+ idx = (idx_generator | dir);
+ idx_generator += 8;
+ } else {
+ idx = index;
+ index = 0;
+ }
+
if (idx == 0)
idx = 8;
- list = xfrm_policy_byidx + idx_hash(idx);
+ list = net->xfrm.policy_byidx + idx_hash(net, idx);
found = 0;
- hlist_for_each_entry(p, entry, list, byidx) {
+ hlist_for_each_entry(p, list, byidx) {
if (p->index == idx) {
found = 1;
break;
@@ -535,25 +579,68 @@ static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s
return 0;
}
+static void xfrm_policy_requeue(struct xfrm_policy *old,
+ struct xfrm_policy *new)
+{
+ struct xfrm_policy_queue *pq = &old->polq;
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock_bh(&pq->hold_queue.lock);
+ skb_queue_splice_init(&pq->hold_queue, &list);
+ if (del_timer(&pq->hold_timer))
+ xfrm_pol_put(old);
+ spin_unlock_bh(&pq->hold_queue.lock);
+
+ if (skb_queue_empty(&list))
+ return;
+
+ pq = &new->polq;
+
+ spin_lock_bh(&pq->hold_queue.lock);
+ skb_queue_splice(&list, &pq->hold_queue);
+ pq->timeout = XFRM_QUEUE_TMO_MIN;
+ if (!mod_timer(&pq->hold_timer, jiffies))
+ xfrm_pol_hold(new);
+ spin_unlock_bh(&pq->hold_queue.lock);
+}
+
+static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
+ struct xfrm_policy *pol)
+{
+ u32 mark = policy->mark.v & policy->mark.m;
+
+ if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
+ return true;
+
+ if ((mark & pol->mark.m) == pol->mark.v &&
+ policy->priority == pol->priority)
+ return true;
+
+ return false;
+}
+
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
+ struct net *net = xp_net(policy);
struct xfrm_policy *pol;
struct xfrm_policy *delpol;
struct hlist_head *chain;
- struct hlist_node *entry, *newpos;
- struct dst_entry *gc_list;
+ struct hlist_node *newpos;
- write_lock_bh(&xfrm_policy_lock);
- chain = policy_hash_bysel(&policy->selector, policy->family, dir);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
!selector_cmp(&pol->selector, &policy->selector) &&
+ xfrm_policy_mark_match(policy, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return -EEXIST;
}
delpol = pol;
@@ -571,176 +658,146 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
else
hlist_add_head(&policy->bydst, chain);
xfrm_pol_hold(policy);
- xfrm_policy_count[dir]++;
- atomic_inc(&flow_cache_genid);
+ net->xfrm.policy_count[dir]++;
+ atomic_inc(&net->xfrm.flow_cache_genid);
+
+ /* After previous checking, family can either be AF_INET or AF_INET6 */
+ if (policy->family == AF_INET)
+ rt_genid_bump_ipv4(net);
+ else
+ rt_genid_bump_ipv6(net);
+
if (delpol) {
- hlist_del(&delpol->bydst);
- hlist_del(&delpol->byidx);
- xfrm_policy_count[dir]--;
+ xfrm_policy_requeue(delpol, policy);
+ __xfrm_policy_unlink(delpol, dir);
}
- policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
- hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
+ policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
+ hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = get_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
- write_unlock_bh(&xfrm_policy_lock);
+ list_add(&policy->walk.all, &net->xfrm.policy_all);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
- else if (xfrm_bydst_should_resize(dir, NULL))
- schedule_work(&xfrm_hash_work);
-
- read_lock_bh(&xfrm_policy_lock);
- gc_list = NULL;
- entry = &policy->bydst;
- hlist_for_each_entry_continue(policy, entry, bydst) {
- struct dst_entry *dst;
-
- write_lock(&policy->lock);
- dst = policy->bundles;
- if (dst) {
- struct dst_entry *tail = dst;
- while (tail->next)
- tail = tail->next;
- tail->next = gc_list;
- gc_list = dst;
-
- policy->bundles = NULL;
- }
- write_unlock(&policy->lock);
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
-
- gc_list = dst->next;
- dst_free(dst);
- }
+ else if (xfrm_bydst_should_resize(net, dir, NULL))
+ schedule_work(&net->xfrm.policy_hash_work);
return 0;
}
EXPORT_SYMBOL(xfrm_policy_insert);
-struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
- struct xfrm_selector *sel,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
+ int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
- struct hlist_node *entry;
*err = 0;
- write_lock_bh(&xfrm_policy_lock);
- chain = policy_hash_bysel(sel, sel->family, dir);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ chain = policy_hash_bysel(net, sel, sel->family, dir);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == type &&
+ (mark & pol->mark.m) == pol->mark.v &&
!selector_cmp(sel, &pol->selector) &&
xfrm_sec_ctx_match(ctx, pol->security)) {
xfrm_pol_hold(pol);
if (delete) {
- *err = security_xfrm_policy_delete(pol);
+ *err = security_xfrm_policy_delete(
+ pol->security);
if (*err) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
- hlist_del(&pol->bydst);
- hlist_del(&pol->byidx);
- xfrm_policy_count[dir]--;
+ __xfrm_policy_unlink(pol, dir);
}
ret = pol;
break;
}
}
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
-struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
- int *err)
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
+ int dir, u32 id, int delete, int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
- struct hlist_node *entry;
*err = -ENOENT;
if (xfrm_policy_id2dir(id) != dir)
return NULL;
*err = 0;
- write_lock_bh(&xfrm_policy_lock);
- chain = xfrm_policy_byidx + idx_hash(id);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, byidx) {
- if (pol->type == type && pol->index == id) {
+ hlist_for_each_entry(pol, chain, byidx) {
+ if (pol->type == type && pol->index == id &&
+ (mark & pol->mark.m) == pol->mark.v) {
xfrm_pol_hold(pol);
if (delete) {
- *err = security_xfrm_policy_delete(pol);
+ *err = security_xfrm_policy_delete(
+ pol->security);
if (*err) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
- hlist_del(&pol->bydst);
- hlist_del(&pol->byidx);
- xfrm_policy_count[dir]--;
+ __xfrm_policy_unlink(pol, dir);
}
ret = pol;
break;
}
}
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_byid);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static inline int
-xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
+xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
int dir, err = 0;
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy *pol;
- struct hlist_node *entry;
int i;
- hlist_for_each_entry(pol, entry,
- &xfrm_policy_inexact[dir], bydst) {
+ hlist_for_each_entry(pol,
+ &net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- err = security_xfrm_policy_delete(pol);
+ err = security_xfrm_policy_delete(pol->security);
if (err) {
- xfrm_audit_policy_delete(pol, 0,
- audit_info->loginuid,
- audit_info->secid);
+ xfrm_audit_policy_delete(pol, 0, task_valid);
return err;
}
}
- for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry,
- xfrm_policy_bydst[dir].table + i,
+ for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
+ hlist_for_each_entry(pol,
+ net->xfrm.policy_bydst[dir].table + i,
bydst) {
if (pol->type != type)
continue;
- err = security_xfrm_policy_delete(pol);
+ err = security_xfrm_policy_delete(
+ pol->security);
if (err) {
xfrm_audit_policy_delete(pol, 0,
- audit_info->loginuid,
- audit_info->secid);
+ task_valid);
return err;
}
}
@@ -750,162 +807,170 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
}
#else
static inline int
-xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
+xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
return 0;
}
#endif
-int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
+int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
{
- int dir, err = 0;
+ int dir, err = 0, cnt = 0;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
- err = xfrm_policy_flush_secctx_check(type, audit_info);
+ err = xfrm_policy_flush_secctx_check(net, type, task_valid);
if (err)
goto out;
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy *pol;
- struct hlist_node *entry;
- int i, killed;
+ int i;
- killed = 0;
again1:
- hlist_for_each_entry(pol, entry,
- &xfrm_policy_inexact[dir], bydst) {
+ hlist_for_each_entry(pol,
+ &net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- hlist_del(&pol->bydst);
- hlist_del(&pol->byidx);
- write_unlock_bh(&xfrm_policy_lock);
+ __xfrm_policy_unlink(pol, dir);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ cnt++;
- xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
- audit_info->secid);
+ xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- killed++;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again1;
}
- for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
+ for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
again2:
- hlist_for_each_entry(pol, entry,
- xfrm_policy_bydst[dir].table + i,
+ hlist_for_each_entry(pol,
+ net->xfrm.policy_bydst[dir].table + i,
bydst) {
if (pol->type != type)
continue;
- hlist_del(&pol->bydst);
- hlist_del(&pol->byidx);
- write_unlock_bh(&xfrm_policy_lock);
+ __xfrm_policy_unlink(pol, dir);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ cnt++;
- xfrm_audit_policy_delete(pol, 1,
- audit_info->loginuid,
- audit_info->secid);
+ xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- killed++;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again2;
}
}
- xfrm_policy_count[dir] -= killed;
}
- atomic_inc(&flow_cache_genid);
+ if (!cnt)
+ err = -ESRCH;
out:
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_flush);
-int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+ int (*func)(struct xfrm_policy *, int, int, void*),
void *data)
{
- struct xfrm_policy *pol, *last = NULL;
- struct hlist_node *entry;
- int dir, last_dir = 0, count, error;
+ struct xfrm_policy *pol;
+ struct xfrm_policy_walk_entry *x;
+ int error = 0;
- read_lock_bh(&xfrm_policy_lock);
- count = 0;
+ if (walk->type >= XFRM_POLICY_TYPE_MAX &&
+ walk->type != XFRM_POLICY_TYPE_ANY)
+ return -EINVAL;
- for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
- struct hlist_head *table = xfrm_policy_bydst[dir].table;
- int i;
+ if (list_empty(&walk->walk.all) && walk->seq != 0)
+ return 0;
- hlist_for_each_entry(pol, entry,
- &xfrm_policy_inexact[dir], bydst) {
- if (pol->type != type)
- continue;
- if (last) {
- error = func(last, last_dir % XFRM_POLICY_MAX,
- count, data);
- if (error)
- goto out;
- }
- last = pol;
- last_dir = dir;
- count++;
- }
- for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry, table + i, bydst) {
- if (pol->type != type)
- continue;
- if (last) {
- error = func(last, last_dir % XFRM_POLICY_MAX,
- count, data);
- if (error)
- goto out;
- }
- last = pol;
- last_dir = dir;
- count++;
- }
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ if (list_empty(&walk->walk.all))
+ x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
+ else
+ x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
+ list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
+ if (x->dead)
+ continue;
+ pol = container_of(x, struct xfrm_policy, walk);
+ if (walk->type != XFRM_POLICY_TYPE_ANY &&
+ walk->type != pol->type)
+ continue;
+ error = func(pol, xfrm_policy_id2dir(pol->index),
+ walk->seq, data);
+ if (error) {
+ list_move_tail(&walk->walk.all, &x->all);
+ goto out;
}
+ walk->seq++;
}
- if (count == 0) {
+ if (walk->seq == 0) {
error = -ENOENT;
goto out;
}
- error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
+ list_del_init(&walk->walk.all);
out:
- read_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
+{
+ INIT_LIST_HEAD(&walk->walk.all);
+ walk->walk.dead = 1;
+ walk->type = type;
+ walk->seq = 0;
+}
+EXPORT_SYMBOL(xfrm_policy_walk_init);
+
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
+{
+ if (list_empty(&walk->walk.all))
+ return;
+
+ write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
+ list_del(&walk->walk.all);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+}
+EXPORT_SYMBOL(xfrm_policy_walk_done);
+
/*
* Find policy to apply to this flow.
*
* Returns 0 if policy found, else an -errno.
*/
-static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
+static int xfrm_policy_match(const struct xfrm_policy *pol,
+ const struct flowi *fl,
u8 type, u16 family, int dir)
{
- struct xfrm_selector *sel = &pol->selector;
- int match, ret = -ESRCH;
+ const struct xfrm_selector *sel = &pol->selector;
+ int ret = -ESRCH;
+ bool match;
if (pol->family != family ||
+ (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
pol->type != type)
return ret;
match = xfrm_selector_match(sel, fl, family);
if (match)
- ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
+ ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
+ dir);
return ret;
}
-static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
+static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
+ const struct flowi *fl,
u16 family, u8 dir)
{
int err;
struct xfrm_policy *pol, *ret;
- xfrm_address_t *daddr, *saddr;
- struct hlist_node *entry;
+ const xfrm_address_t *daddr, *saddr;
struct hlist_head *chain;
u32 priority = ~0U;
@@ -914,10 +979,10 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
if (unlikely(!daddr || !saddr))
return NULL;
- read_lock_bh(&xfrm_policy_lock);
- chain = policy_hash_direct(daddr, saddr, family, dir);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -932,8 +997,8 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
break;
}
}
- chain = &xfrm_policy_inexact[dir];
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ chain = &net->xfrm.policy_inexact[dir];
+ hlist_for_each_entry(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -950,37 +1015,60 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
if (ret)
xfrm_pol_hold(ret);
fail:
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
-static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
- void **objp, atomic_t **obj_refp)
+static struct xfrm_policy *
+__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
{
+#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
- int err = 0;
-#ifdef CONFIG_XFRM_SUB_POLICY
- pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
- }
- if (pol || err)
- goto end;
+ pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
+ if (pol != NULL)
+ return pol;
#endif
- pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
+ return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
+}
+
+static int flow_to_policy_dir(int dir)
+{
+ if (XFRM_POLICY_IN == FLOW_DIR_IN &&
+ XFRM_POLICY_OUT == FLOW_DIR_OUT &&
+ XFRM_POLICY_FWD == FLOW_DIR_FWD)
+ return dir;
+
+ switch (dir) {
+ default:
+ case FLOW_DIR_IN:
+ return XFRM_POLICY_IN;
+ case FLOW_DIR_OUT:
+ return XFRM_POLICY_OUT;
+ case FLOW_DIR_FWD:
+ return XFRM_POLICY_FWD;
}
-#ifdef CONFIG_XFRM_SUB_POLICY
-end:
-#endif
- if ((*objp = (void *) pol) != NULL)
- *obj_refp = &pol->refcnt;
- return err;
+}
+
+static struct flow_cache_object *
+xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
+ u8 dir, struct flow_cache_object *old_obj, void *ctx)
+{
+ struct xfrm_policy *pol;
+
+ if (old_obj)
+ xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
+
+ pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
+ if (IS_ERR_OR_NULL(pol))
+ return ERR_CAST(pol);
+
+ /* Resolver returns two references:
+ * one for cache and one for caller of flow_cache_lookup() */
+ xfrm_pol_hold(pol);
+
+ return &pol->flo;
}
static inline int policy_to_flow_dir(int dir)
@@ -1000,19 +1088,26 @@ static inline int policy_to_flow_dir(int dir)
}
}
-static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
+static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
+ const struct flowi *fl)
{
struct xfrm_policy *pol;
+ struct net *net = sock_net(sk);
- read_lock_bh(&xfrm_policy_lock);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
if ((pol = sk->sk_policy[dir]) != NULL) {
- int match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl,
+ sk->sk_family);
int err = 0;
if (match) {
- err = security_xfrm_policy_lookup(pol, fl->secid,
- policy_to_flow_dir(dir));
+ if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
+ pol = NULL;
+ goto out;
+ }
+ err = security_xfrm_policy_lookup(pol->security,
+ fl->flowi_secid,
+ policy_to_flow_dir(dir));
if (!err)
xfrm_pol_hold(pol);
else if (err == -ESRCH)
@@ -1022,45 +1117,51 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
} else
pol = NULL;
}
- read_unlock_bh(&xfrm_policy_lock);
+out:
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
- struct hlist_head *chain = policy_hash_bysel(&pol->selector,
+ struct net *net = xp_net(pol);
+ struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
pol->family, dir);
+ list_add(&pol->walk.all, &net->xfrm.policy_all);
hlist_add_head(&pol->bydst, chain);
- hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
- xfrm_policy_count[dir]++;
+ hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
+ net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
- if (xfrm_bydst_should_resize(dir, NULL))
- schedule_work(&xfrm_hash_work);
+ if (xfrm_bydst_should_resize(net, dir, NULL))
+ schedule_work(&net->xfrm.policy_hash_work);
}
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir)
{
+ struct net *net = xp_net(pol);
+
if (hlist_unhashed(&pol->bydst))
return NULL;
- hlist_del(&pol->bydst);
+ hlist_del_init(&pol->bydst);
hlist_del(&pol->byidx);
- xfrm_policy_count[dir]--;
+ list_del(&pol->walk.all);
+ net->xfrm.policy_count[dir]--;
return pol;
}
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
- write_lock_bh(&xfrm_policy_lock);
+ struct net *net = xp_net(pol);
+
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
- if (dir < XFRM_POLICY_MAX)
- atomic_inc(&flow_cache_genid);
xfrm_policy_kill(pol);
return 0;
}
@@ -1070,6 +1171,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
+ struct net *net = xp_net(pol);
struct xfrm_policy *old_pol;
#ifdef CONFIG_XFRM_SUB_POLICY
@@ -1077,17 +1179,24 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return -EINVAL;
#endif
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
old_pol = sk->sk_policy[dir];
sk->sk_policy[dir] = pol;
if (pol) {
pol->curlft.add_time = get_seconds();
- pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
+ pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
}
- if (old_pol)
+ if (old_pol) {
+ if (pol)
+ xfrm_policy_requeue(old_pol, pol);
+
+ /* Unlinking succeeds always. This is the only function
+ * allowed to delete or replace socket policy.
+ */
__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
- write_unlock_bh(&xfrm_policy_lock);
+ }
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (old_pol) {
xfrm_policy_kill(old_pol);
@@ -1095,18 +1204,21 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return 0;
}
-static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
+static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
{
- struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
+ struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
+ struct net *net = xp_net(old);
if (newp) {
newp->selector = old->selector;
- if (security_xfrm_policy_clone(old, newp)) {
+ if (security_xfrm_policy_clone(old->security,
+ &newp->security)) {
kfree(newp);
return NULL; /* ENOMEM */
}
newp->lft = old->lft;
newp->curlft = old->curlft;
+ newp->mark = old->mark;
newp->action = old->action;
newp->flags = old->flags;
newp->xfrm_nr = old->xfrm_nr;
@@ -1114,9 +1226,9 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
newp->type = old->type;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
return newp;
@@ -1136,7 +1248,7 @@ int __xfrm_sk_clone_policy(struct sock *sk)
}
static int
-xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
+xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
unsigned short family)
{
int err;
@@ -1144,7 +1256,7 @@ xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(local, remote);
+ err = afinfo->get_saddr(net, local, remote);
xfrm_policy_put_afinfo(afinfo);
return err;
}
@@ -1152,17 +1264,17 @@ xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
/* Resolve list of templates for the flow, given policy. */
static int
-xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
- struct xfrm_state **xfrm,
- unsigned short family)
+xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
+ struct xfrm_state **xfrm, unsigned short family)
{
+ struct net *net = xp_net(policy);
int nx;
int i, error;
xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
xfrm_address_t tmp;
- for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
+ for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
struct xfrm_state *x;
xfrm_address_t *remote = daddr;
xfrm_address_t *local = saddr;
@@ -1172,9 +1284,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
- family = tmpl->encap_family;
- if (xfrm_addr_any(local, family)) {
- error = xfrm_get_saddr(&tmp, remote, family);
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+ error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
@@ -1193,6 +1304,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
error = (x->km.state == XFRM_STATE_ERROR ?
-EINVAL : -EAGAIN);
xfrm_state_put(x);
+ } else if (error == -ESRCH) {
+ error = -EAGAIN;
}
if (!tmpl->optional)
@@ -1201,15 +1314,14 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
return nx;
fail:
- for (nx--; nx>=0; nx--)
+ for (nx--; nx >= 0; nx--)
xfrm_state_put(xfrm[nx]);
return error;
}
static int
-xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
- struct xfrm_state **xfrm,
- unsigned short family)
+xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
+ struct xfrm_state **xfrm, unsigned short family)
{
struct xfrm_state *tp[XFRM_MAX_DEPTH];
struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
@@ -1239,7 +1351,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
return cnx;
fail:
- for (cnx--; cnx>=0; cnx--)
+ for (cnx--; cnx >= 0; cnx--)
xfrm_state_put(tpp[cnx]);
return error;
@@ -1249,19 +1361,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
* still valid.
*/
-static struct dst_entry *
-xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
-{
- struct dst_entry *x;
- struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
- if (unlikely(afinfo == NULL))
- return ERR_PTR(-EINVAL);
- x = afinfo->find_bundle(fl, policy);
- xfrm_policy_put_afinfo(afinfo);
- return x;
-}
-
-static inline int xfrm_get_tos(struct flowi *fl, int family)
+static inline int xfrm_get_tos(const struct flowi *fl, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
int tos;
@@ -1276,15 +1376,88 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
return tos;
}
-static inline struct xfrm_dst *xfrm_alloc_dst(int family)
+static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (xdst->route == NULL) {
+ /* Dummy bundle - if it has xfrms we were not
+ * able to build bundle as template resolution failed.
+ * It means we need to try again resolving. */
+ if (xdst->num_xfrms > 0)
+ return NULL;
+ } else if (dst->flags & DST_XFRM_QUEUE) {
+ return NULL;
+ } else {
+ /* Real bundle */
+ if (stale_bundle(dst))
+ return NULL;
+ }
+
+ dst_hold(dst);
+ return flo;
+}
+
+static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (!xdst->route)
+ return 0;
+ if (stale_bundle(dst))
+ return 0;
+
+ return 1;
+}
+
+static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ dst_free(dst);
+}
+
+static const struct flow_cache_ops xfrm_bundle_fc_ops = {
+ .get = xfrm_bundle_flo_get,
+ .check = xfrm_bundle_flo_check,
+ .delete = xfrm_bundle_flo_delete,
+};
+
+static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+ struct dst_ops *dst_ops;
struct xfrm_dst *xdst;
if (!afinfo)
return ERR_PTR(-EINVAL);
- xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
+ switch (family) {
+ case AF_INET:
+ dst_ops = &net->xfrm.xfrm4_dst_ops;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ dst_ops = &net->xfrm.xfrm6_dst_ops;
+ break;
+#endif
+ default:
+ BUG();
+ }
+ xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
+
+ if (likely(xdst)) {
+ struct dst_entry *dst = &xdst->u.dst;
+
+ memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
+ xdst->flo.ops = &xfrm_bundle_fc_ops;
+ if (afinfo->init_dst)
+ afinfo->init_dst(net, xdst);
+ } else
+ xdst = ERR_PTR(-ENOBUFS);
xfrm_policy_put_afinfo(afinfo);
@@ -1308,7 +1481,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return err;
}
-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ const struct flowi *fl)
{
struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1317,24 +1491,27 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
if (!afinfo)
return -EINVAL;
- err = afinfo->fill_dst(xdst, dev);
+ err = afinfo->fill_dst(xdst, dev, fl);
xfrm_policy_put_afinfo(afinfo);
return err;
}
+
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
struct xfrm_state **xfrm, int nx,
- struct flowi *fl,
+ const struct flowi *fl,
struct dst_entry *dst)
{
+ struct net *net = xp_net(policy);
unsigned long now = jiffies;
struct net_device *dev;
+ struct xfrm_mode *inner_mode;
struct dst_entry *dst_prev = NULL;
struct dst_entry *dst0 = NULL;
int i = 0;
@@ -1344,6 +1521,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
int trailer_len = 0;
int tos;
int family = policy->selector.family;
+ xfrm_address_t saddr, daddr;
+
+ xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
tos = xfrm_get_tos(fl, family);
err = tos;
@@ -1353,7 +1533,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_hold(dst);
for (; i < nx; i++) {
- struct xfrm_dst *xdst = xfrm_alloc_dst(family);
+ struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
struct dst_entry *dst1 = &xdst->u.dst;
err = PTR_ERR(xdst);
@@ -1362,6 +1542,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
+ if (xfrm[i]->sel.family == AF_UNSPEC) {
+ inner_mode = xfrm_ip2inner_mode(xfrm[i],
+ xfrm_af2proto(family));
+ if (!inner_mode) {
+ err = -EAFNOSUPPORT;
+ dst_release(dst);
+ goto put_states;
+ }
+ } else
+ inner_mode = xfrm[i]->inner_mode;
+
if (!dst_prev)
dst0 = dst1;
else {
@@ -1370,11 +1561,12 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
}
xdst->route = dst;
- memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
+ dst_copy_metrics(dst1, dst);
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, family);
+ dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
+ family);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
@@ -1382,14 +1574,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_hold(dst);
dst1->xfrm = xfrm[i];
- xdst->genid = xfrm[i]->genid;
+ xdst->xfrm_genid = xfrm[i]->genid;
- dst1->obsolete = -1;
+ dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_HOST;
dst1->lastuse = now;
dst1->input = dst_discard;
- dst1->output = xfrm[i]->outer_mode->afinfo->output;
+ dst1->output = inner_mode->afinfo->output;
dst1->next = dst_prev;
dst_prev = dst1;
@@ -1408,16 +1600,13 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (!dev)
goto free_dst;
- /* Copy neighbout for reachability confirmation */
- dst0->neighbour = neigh_clone(dst->neighbour);
-
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
xfrm_init_pmtu(dst_prev);
for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
- err = xfrm_fill_dst(xdst, dev);
+ err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
@@ -1440,20 +1629,22 @@ free_dst:
goto out;
}
-static int inline
-xfrm_dst_alloc_copy(void **target, void *src, int size)
+#ifdef CONFIG_XFRM_SUB_POLICY
+static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
{
if (!*target) {
*target = kmalloc(size, GFP_ATOMIC);
if (!*target)
return -ENOMEM;
}
+
memcpy(*target, src, size);
return 0;
}
+#endif
-static int inline
-xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
+static int xfrm_dst_update_parent(struct dst_entry *dst,
+ const struct xfrm_selector *sel)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1464,8 +1655,8 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
#endif
}
-static int inline
-xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
+static int xfrm_dst_update_origin(struct dst_entry *dst,
+ const struct flowi *fl)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1475,276 +1666,537 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
#endif
}
-static int stale_bundle(struct dst_entry *dst);
+static int xfrm_expand_policies(const struct flowi *fl, u16 family,
+ struct xfrm_policy **pols,
+ int *num_pols, int *num_xfrms)
+{
+ int i;
-/* Main function: finds/creates a bundle for given flow.
- *
- * At the moment we eat a raw IP route. Mostly to speed up lookups
- * on interfaces with disabled IPsec.
- */
-int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
- struct sock *sk, int flags)
+ if (*num_pols == 0 || !pols[0]) {
+ *num_pols = 0;
+ *num_xfrms = 0;
+ return 0;
+ }
+ if (IS_ERR(pols[0]))
+ return PTR_ERR(pols[0]);
+
+ *num_xfrms = pols[0]->xfrm_nr;
+
+#ifdef CONFIG_XFRM_SUB_POLICY
+ if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
+ pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
+ pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
+ XFRM_POLICY_TYPE_MAIN,
+ fl, family,
+ XFRM_POLICY_OUT);
+ if (pols[1]) {
+ if (IS_ERR(pols[1])) {
+ xfrm_pols_put(pols, *num_pols);
+ return PTR_ERR(pols[1]);
+ }
+ (*num_pols)++;
+ (*num_xfrms) += pols[1]->xfrm_nr;
+ }
+ }
+#endif
+ for (i = 0; i < *num_pols; i++) {
+ if (pols[i]->action != XFRM_POLICY_ALLOW) {
+ *num_xfrms = -1;
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static struct xfrm_dst *
+xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+ const struct flowi *fl, u16 family,
+ struct dst_entry *dst_orig)
{
- struct xfrm_policy *policy;
- struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
- int npols;
- int pol_dead;
- int xfrm_nr;
- int pi;
+ struct net *net = xp_net(pols[0]);
struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
- struct dst_entry *dst, *dst_orig = *dst_p;
- int nx = 0;
+ struct dst_entry *dst;
+ struct xfrm_dst *xdst;
int err;
- u32 genid;
- u16 family;
- u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
-restart:
- genid = atomic_read(&flow_cache_genid);
- policy = NULL;
- for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
- pols[pi] = NULL;
- npols = 0;
- pol_dead = 0;
- xfrm_nr = 0;
+ /* Try to instantiate a bundle */
+ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
+ if (err <= 0) {
+ if (err != 0 && err != -EAGAIN)
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ return ERR_PTR(err);
+ }
- if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
- policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
- goto dropdst;
- }
+ dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+ if (IS_ERR(dst)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
+ return ERR_CAST(dst);
}
- if (!policy) {
- /* To accelerate a bit... */
- if ((dst_orig->flags & DST_NOXFRM) ||
- !xfrm_policy_count[XFRM_POLICY_OUT])
- goto nopol;
+ xdst = (struct xfrm_dst *)dst;
+ xdst->num_xfrms = err;
+ if (num_pols > 1)
+ err = xfrm_dst_update_parent(dst, &pols[1]->selector);
+ else
+ err = xfrm_dst_update_origin(dst, fl);
+ if (unlikely(err)) {
+ dst_free(dst);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
+ return ERR_PTR(err);
+ }
- policy = flow_cache_lookup(fl, dst_orig->ops->family,
- dir, xfrm_policy_lookup);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
- goto dropdst;
- }
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
+ xdst->policy_genid = atomic_read(&pols[0]->genid);
+
+ return xdst;
+}
+
+static void xfrm_policy_queue_process(unsigned long arg)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct dst_entry *dst;
+ struct xfrm_policy *pol = (struct xfrm_policy *)arg;
+ struct xfrm_policy_queue *pq = &pol->polq;
+ struct flowi fl;
+ struct sk_buff_head list;
+
+ spin_lock(&pq->hold_queue.lock);
+ skb = skb_peek(&pq->hold_queue);
+ if (!skb) {
+ spin_unlock(&pq->hold_queue.lock);
+ goto out;
}
+ dst = skb_dst(skb);
+ sk = skb->sk;
+ xfrm_decode_session(skb, &fl, dst->ops->family);
+ spin_unlock(&pq->hold_queue.lock);
- if (!policy)
- goto nopol;
+ dst_hold(dst->path);
+ dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
+ sk, 0);
+ if (IS_ERR(dst))
+ goto purge_queue;
- family = dst_orig->ops->family;
- pols[0] = policy;
- npols ++;
- xfrm_nr += pols[0]->xfrm_nr;
+ if (dst->flags & DST_XFRM_QUEUE) {
+ dst_release(dst);
- err = -ENOENT;
- if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
- goto error;
+ if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
+ goto purge_queue;
- policy->curlft.use_time = get_seconds();
+ pq->timeout = pq->timeout << 1;
+ if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
+ xfrm_pol_hold(pol);
+ goto out;
+ }
- switch (policy->action) {
- default:
- case XFRM_POLICY_BLOCK:
- /* Prohibit the flow */
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
- err = -EPERM;
- goto error;
+ dst_release(dst);
- case XFRM_POLICY_ALLOW:
-#ifndef CONFIG_XFRM_SUB_POLICY
- if (policy->xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pol_put(policy);
- return 0;
- }
-#endif
+ __skb_queue_head_init(&list);
- /* Try to find matching bundle.
- *
- * LATER: help from flow cache. It is optional, this
- * is required only for output policy.
- */
- dst = xfrm_find_bundle(fl, policy, family);
+ spin_lock(&pq->hold_queue.lock);
+ pq->timeout = 0;
+ skb_queue_splice_init(&pq->hold_queue, &list);
+ spin_unlock(&pq->hold_queue.lock);
+
+ while (!skb_queue_empty(&list)) {
+ skb = __skb_dequeue(&list);
+
+ xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
+ dst_hold(skb_dst(skb)->path);
+ dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
+ &fl, skb->sk, 0);
if (IS_ERR(dst)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = PTR_ERR(dst);
- goto error;
+ kfree_skb(skb);
+ continue;
}
- if (dst)
- break;
+ nf_reset(skb);
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
-#ifdef CONFIG_XFRM_SUB_POLICY
- if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
- pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
- fl, family,
- XFRM_POLICY_OUT);
- if (pols[1]) {
- if (IS_ERR(pols[1])) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
- err = PTR_ERR(pols[1]);
- goto error;
- }
- if (pols[1]->action == XFRM_POLICY_BLOCK) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
- err = -EPERM;
- goto error;
- }
- npols ++;
- xfrm_nr += pols[1]->xfrm_nr;
- }
- }
+ err = dst_output(skb);
+ }
- /*
- * Because neither flowi nor bundle information knows about
- * transformation template size. On more than one policy usage
- * we can realize whether all of them is bypass or not after
- * they are searched. See above not-transformed bypass
- * is surrounded by non-sub policy configuration, too.
- */
- if (xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
+out:
+ xfrm_pol_put(pol);
+ return;
-#endif
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
-
- if (unlikely(nx<0)) {
- err = nx;
- if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route.
- */
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
- xfrm_pol_put(policy);
- return -EREMOTE;
- }
- if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
- DECLARE_WAITQUEUE(wait, current);
+purge_queue:
+ pq->timeout = 0;
+ xfrm_queue_purge(&pq->hold_queue);
+ xfrm_pol_put(pol);
+}
- add_wait_queue(&km_waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&km_waitq, &wait);
+static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned long sched_next;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+ struct xfrm_policy *pol = xdst->pols[0];
+ struct xfrm_policy_queue *pq = &pol->polq;
+ const struct sk_buff *fclone = skb + 1;
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
+ if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+ fclone->fclone == SKB_FCLONE_CLONE)) {
+ kfree_skb(skb);
+ return 0;
+ }
- if (nx == -EAGAIN && signal_pending(current)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
- err = -ERESTART;
- goto error;
- }
- if (nx == -EAGAIN ||
- genid != atomic_read(&flow_cache_genid)) {
- xfrm_pols_put(pols, npols);
- goto restart;
- }
- err = nx;
- }
- if (err < 0) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
- goto error;
- }
- }
- if (nx == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
+ if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
+ kfree_skb(skb);
+ return -EAGAIN;
+ }
- dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
- err = PTR_ERR(dst);
- if (IS_ERR(dst)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLEGENERROR);
- goto error;
- }
+ skb_dst_force(skb);
- for (pi = 0; pi < npols; pi++) {
- read_lock_bh(&pols[pi]->lock);
- pol_dead |= pols[pi]->dead;
- read_unlock_bh(&pols[pi]->lock);
- }
+ spin_lock_bh(&pq->hold_queue.lock);
- write_lock_bh(&policy->lock);
- if (unlikely(pol_dead || stale_bundle(dst))) {
- /* Wow! While we worked on resolving, this
- * policy has gone. Retry. It is not paranoia,
- * we just cannot enlist new bundle to dead object.
- * We can't enlist stable bundles either.
- */
- write_unlock_bh(&policy->lock);
- if (dst)
- dst_free(dst);
-
- if (pol_dead)
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD);
- else
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = -EHOSTUNREACH;
- goto error;
- }
+ if (!pq->timeout)
+ pq->timeout = XFRM_QUEUE_TMO_MIN;
- if (npols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- write_unlock_bh(&policy->lock);
- if (dst)
- dst_free(dst);
- XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- goto error;
- }
+ sched_next = jiffies + pq->timeout;
- dst->next = policy->bundles;
- policy->bundles = dst;
- dst_hold(dst);
- write_unlock_bh(&policy->lock);
+ if (del_timer(&pq->hold_timer)) {
+ if (time_before(pq->hold_timer.expires, sched_next))
+ sched_next = pq->hold_timer.expires;
+ xfrm_pol_put(pol);
}
- *dst_p = dst;
- dst_release(dst_orig);
- xfrm_pols_put(pols, npols);
+
+ __skb_queue_tail(&pq->hold_queue, skb);
+ if (!mod_timer(&pq->hold_timer, sched_next))
+ xfrm_pol_hold(pol);
+
+ spin_unlock_bh(&pq->hold_queue.lock);
+
return 0;
+}
+
+static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
+ struct dst_entry *dst,
+ const struct flowi *fl,
+ int num_xfrms,
+ u16 family)
+{
+ int err;
+ struct net_device *dev;
+ struct dst_entry *dst1;
+ struct xfrm_dst *xdst;
+
+ xdst = xfrm_alloc_dst(net, family);
+ if (IS_ERR(xdst))
+ return xdst;
+
+ if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
+ return xdst;
+
+ dst1 = &xdst->u.dst;
+ dst_hold(dst);
+ xdst->route = dst;
+
+ dst_copy_metrics(dst1, dst);
+
+ dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
+ dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
+ dst1->lastuse = jiffies;
+
+ dst1->input = dst_discard;
+ dst1->output = xdst_queue_output;
+
+ dst_hold(dst);
+ dst1->child = dst;
+ dst1->path = dst;
+
+ xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
+
+ err = -ENODEV;
+ dev = dst->dev;
+ if (!dev)
+ goto free_dst;
+
+ err = xfrm_fill_dst(xdst, dev, fl);
+ if (err)
+ goto free_dst;
+
+out:
+ return xdst;
+
+free_dst:
+ dst_release(dst1);
+ xdst = ERR_PTR(err);
+ goto out;
+}
+
+static struct flow_cache_object *
+xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
+ struct flow_cache_object *oldflo, void *ctx)
+{
+ struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ struct xfrm_dst *xdst, *new_xdst;
+ int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
+
+ /* Check if the policies from old bundle are usable */
+ xdst = NULL;
+ if (oldflo) {
+ xdst = container_of(oldflo, struct xfrm_dst, flo);
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ pol_dead = 0;
+ for (i = 0; i < num_pols; i++) {
+ pols[i] = xdst->pols[i];
+ pol_dead |= pols[i]->walk.dead;
+ }
+ if (pol_dead) {
+ dst_free(&xdst->u.dst);
+ xdst = NULL;
+ num_pols = 0;
+ num_xfrms = 0;
+ oldflo = NULL;
+ }
+ }
+ /* Resolve policies to use if we couldn't get them from
+ * previous cache entry */
+ if (xdst == NULL) {
+ num_pols = 1;
+ pols[0] = __xfrm_policy_lookup(net, fl, family,
+ flow_to_policy_dir(dir));
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
+ goto inc_error;
+ if (num_pols == 0)
+ return NULL;
+ if (num_xfrms <= 0)
+ goto make_dummy_bundle;
+ }
+
+ new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+ if (IS_ERR(new_xdst)) {
+ err = PTR_ERR(new_xdst);
+ if (err != -EAGAIN)
+ goto error;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
+ } else if (new_xdst == NULL) {
+ num_xfrms = 0;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ xdst->num_xfrms = 0;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
+ }
+
+ /* Kill the previous bundle */
+ if (xdst) {
+ /* The policies were stolen for newly generated bundle */
+ xdst->num_pols = 0;
+ dst_free(&xdst->u.dst);
+ }
+
+ /* Flow cache does not have reference, it dst_free()'s,
+ * but we do need to return one reference for original caller */
+ dst_hold(&new_xdst->u.dst);
+ return &new_xdst->flo;
+
+make_dummy_bundle:
+ /* We found policies, but there's no bundles to instantiate:
+ * either because the policy blocks, has no transformations or
+ * we could not build template (no xfrm_states).*/
+ xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ return ERR_CAST(xdst);
+ }
+ xdst->num_pols = num_pols;
+ xdst->num_xfrms = num_xfrms;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
+
+ dst_hold(&xdst->u.dst);
+ return &xdst->flo;
+
+inc_error:
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
- xfrm_pols_put(pols, npols);
-dropdst:
- dst_release(dst_orig);
- *dst_p = NULL;
- return err;
+ if (xdst != NULL)
+ dst_free(&xdst->u.dst);
+ else
+ xfrm_pols_put(pols, num_pols);
+ return ERR_PTR(err);
+}
-nopol:
- err = -ENOENT;
- if (flags & XFRM_LOOKUP_ICMP)
- goto dropdst;
- return 0;
+static struct dst_entry *make_blackhole(struct net *net, u16 family,
+ struct dst_entry *dst_orig)
+{
+ struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+ struct dst_entry *ret;
+
+ if (!afinfo) {
+ dst_release(dst_orig);
+ return ERR_PTR(-EINVAL);
+ } else {
+ ret = afinfo->blackhole_route(net, dst_orig);
+ }
+ xfrm_policy_put_afinfo(afinfo);
+
+ return ret;
}
-EXPORT_SYMBOL(__xfrm_lookup);
-int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
- struct sock *sk, int flags)
+/* Main function: finds/creates a bundle for given flow.
+ *
+ * At the moment we eat a raw IP route. Mostly to speed up lookups
+ * on interfaces with disabled IPsec.
+ */
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ struct sock *sk, int flags)
{
- int err = __xfrm_lookup(dst_p, fl, sk, flags);
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ struct flow_cache_object *flo;
+ struct xfrm_dst *xdst;
+ struct dst_entry *dst, *route;
+ u16 family = dst_orig->ops->family;
+ u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
+ int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
+
+ dst = NULL;
+ xdst = NULL;
+ route = NULL;
+
+ if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
+ num_pols = 1;
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
+ goto dropdst;
+
+ if (num_pols) {
+ if (num_xfrms <= 0) {
+ drop_pols = num_pols;
+ goto no_transform;
+ }
+
+ xdst = xfrm_resolve_and_create_bundle(
+ pols, num_pols, fl,
+ family, dst_orig);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ err = PTR_ERR(xdst);
+ goto dropdst;
+ } else if (xdst == NULL) {
+ num_xfrms = 0;
+ drop_pols = num_pols;
+ goto no_transform;
+ }
+
+ dst_hold(&xdst->u.dst);
+ xdst->u.dst.flags |= DST_NOCACHE;
+ route = xdst->route;
+ }
+ }
+
+ if (xdst == NULL) {
+ /* To accelerate a bit... */
+ if ((dst_orig->flags & DST_NOXFRM) ||
+ !net->xfrm.policy_count[XFRM_POLICY_OUT])
+ goto nopol;
+
+ flo = flow_cache_lookup(net, fl, family, dir,
+ xfrm_bundle_lookup, dst_orig);
+ if (flo == NULL)
+ goto nopol;
+ if (IS_ERR(flo)) {
+ err = PTR_ERR(flo);
+ goto dropdst;
+ }
+ xdst = container_of(flo, struct xfrm_dst, flo);
+
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
+ route = xdst->route;
+ }
+
+ dst = &xdst->u.dst;
+ if (route == NULL && num_xfrms > 0) {
+ /* The only case when xfrm_bundle_lookup() returns a
+ * bundle with null route, is when the template could
+ * not be resolved. It means policies are there, but
+ * bundle could not be created, since we don't yet
+ * have the xfrm_state's. We need to wait for KM to
+ * negotiate new SA's or bail out with error.*/
+ if (net->xfrm.sysctl_larval_drop) {
+ dst_release(dst);
+ xfrm_pols_put(pols, drop_pols);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+
+ return make_blackhole(net, family, dst_orig);
+ }
- if (err == -EREMOTE) {
- dst_release(*dst_p);
- *dst_p = NULL;
err = -EAGAIN;
+
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ goto error;
}
- return err;
+no_transform:
+ if (num_pols == 0)
+ goto nopol;
+
+ if ((flags & XFRM_LOOKUP_ICMP) &&
+ !(pols[0]->flags & XFRM_POLICY_ICMP)) {
+ err = -ENOENT;
+ goto error;
+ }
+
+ for (i = 0; i < num_pols; i++)
+ pols[i]->curlft.use_time = get_seconds();
+
+ if (num_xfrms < 0) {
+ /* Prohibit the flow */
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
+ err = -EPERM;
+ goto error;
+ } else if (num_xfrms > 0) {
+ /* Flow transformed */
+ dst_release(dst_orig);
+ } else {
+ /* Flow passes untransformed */
+ dst_release(dst);
+ dst = dst_orig;
+ }
+ok:
+ xfrm_pols_put(pols, drop_pols);
+ if (dst && dst->xfrm &&
+ dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+ dst->flags |= DST_XFRM_TUNNEL;
+ return dst;
+
+nopol:
+ if (!(flags & XFRM_LOOKUP_ICMP)) {
+ dst = dst_orig;
+ goto ok;
+ }
+ err = -ENOENT;
+error:
+ dst_release(dst);
+dropdst:
+ dst_release(dst_orig);
+ xfrm_pols_put(pols, drop_pols);
+ return ERR_PTR(err);
}
EXPORT_SYMBOL(xfrm_lookup);
static inline int
-xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
+xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
struct xfrm_state *x;
@@ -1763,7 +2215,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
*/
static inline int
-xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
+xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
unsigned short family)
{
if (xfrm_state_kern(x))
@@ -1772,7 +2224,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
x->props.mode == tmpl->mode &&
- ((tmpl->aalgos & (1<<x->props.aalgo)) ||
+ (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
!(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
!(x->props.mode != XFRM_MODE_TRANSPORT &&
xfrm_state_addr_cmp(tmpl, x, family));
@@ -1786,7 +2238,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
* Otherwise "-2 - errored_index" is returned.
*/
static inline int
-xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
+xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
unsigned short family)
{
int idx = start;
@@ -1818,13 +2270,13 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
return -EAFNOSUPPORT;
afinfo->decode_session(skb, fl, reverse);
- err = security_xfrm_decode_session(skb, &fl->secid);
+ err = security_xfrm_decode_session(skb, &fl->flowi_secid);
xfrm_policy_put_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(__xfrm_decode_session);
-static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
+static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
{
for (; k < sp->len; k++) {
if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
@@ -1839,6 +2291,7 @@ static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
+ struct net *net = dev_net(skb->dev);
struct xfrm_policy *pol;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int npols = 0;
@@ -1854,7 +2307,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
fl_dir = policy_to_flow_dir(dir);
if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
return 0;
}
@@ -1864,10 +2317,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (skb->sp) {
int i;
- for (i=skb->sp->len-1; i>=0; i--) {
+ for (i = skb->sp->len-1; i >= 0; i--) {
struct xfrm_state *x = skb->sp->xvec[i];
if (!xfrm_selector_match(&x->sel, &fl, family)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
return 0;
}
}
@@ -1877,24 +2330,31 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (sk && sk->sk_policy[dir]) {
pol = xfrm_sk_policy_lookup(sk, dir, &fl);
if (IS_ERR(pol)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
}
- if (!pol)
- pol = flow_cache_lookup(&fl, family, fl_dir,
- xfrm_policy_lookup);
+ if (!pol) {
+ struct flow_cache_object *flo;
+
+ flo = flow_cache_lookup(net, &fl, family, fl_dir,
+ xfrm_policy_lookup, NULL);
+ if (IS_ERR_OR_NULL(flo))
+ pol = ERR_CAST(flo);
+ else
+ pol = container_of(flo, struct xfrm_policy, flo);
+ }
if (IS_ERR(pol)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
if (!pol) {
if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
- XFRM_INC_STATS(LINUX_MIB_XFRMINNOPOLS);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
}
return 1;
@@ -1903,19 +2363,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol->curlft.use_time = get_seconds();
pols[0] = pol;
- npols ++;
+ npols++;
#ifdef CONFIG_XFRM_SUB_POLICY
if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
- pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
+ pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
&fl, family,
XFRM_POLICY_IN);
if (pols[1]) {
if (IS_ERR(pols[1])) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
pols[1]->curlft.use_time = get_seconds();
- npols ++;
+ npols++;
}
}
#endif
@@ -1935,11 +2395,11 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
for (pi = 0; pi < npols; pi++) {
if (pols[pi] != pol &&
pols[pi]->action != XFRM_POLICY_ALLOW) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
goto reject;
}
if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto reject_error;
}
for (i = 0; i < pols[pi]->xfrm_nr; i++)
@@ -1947,7 +2407,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
xfrm_nr = ti;
if (npols > 1) {
- xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
+ xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
tpp = stp;
}
@@ -1963,20 +2423,20 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (k < -1)
/* "-2 - errored_index" returned */
xerr_idx = -(2+k);
- XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
goto reject;
}
}
if (secpath_has_nontransport(sp, k, &xerr_idx)) {
- XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
goto reject;
}
xfrm_pols_put(pols, npols);
return 1;
}
- XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
reject:
xfrm_secpath_reject(xerr_idx, skb, &fl);
@@ -1988,15 +2448,25 @@ EXPORT_SYMBOL(__xfrm_policy_check);
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
+ struct net *net = dev_net(skb->dev);
struct flowi fl;
+ struct dst_entry *dst;
+ int res = 1;
if (xfrm_decode_session(skb, &fl, family) < 0) {
- /* XXX: we should have something like FWDHDRERROR here. */
- XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
- return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
+ skb_dst_force(skb);
+
+ dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+ if (IS_ERR(dst)) {
+ res = 0;
+ dst = NULL;
+ }
+ skb_dst_set(skb, dst);
+ return res;
}
EXPORT_SYMBOL(__xfrm_route_forward);
@@ -2005,12 +2475,13 @@ EXPORT_SYMBOL(__xfrm_route_forward);
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
- * to "-1" to force all XFRM destinations to get validated by
- * dst_ops->check on every use. We do this because when a
- * normal route referenced by an XFRM dst is obsoleted we do
- * not go looking around for all parent referencing XFRM dsts
- * so that we can invalidate them. It is just too much work.
- * Instead we make the checks here on every use. For example:
+ * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
+ * get validated by dst_ops->check on every use. We do this
+ * because when a normal route referenced by an XFRM dst is
+ * obsoleted we do not go looking around for all parent
+ * referencing XFRM dsts so that we can invalidate them. It
+ * is just too much work. Instead we make the checks here on
+ * every use. For example:
*
* XFRM dst A --> IPv4 dst X
*
@@ -2020,9 +2491,9 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
* stale_bundle() check.
*
* When a policy's bundle is pruned, we dst_free() the XFRM
- * dst which causes it's ->obsolete field to be set to a
- * positive non-zero integer. If an XFRM dst has been pruned
- * like this, we want to force a new route lookup.
+ * dst which causes it's ->obsolete field to be set to
+ * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
+ * this, we want to force a new route lookup.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
@@ -2032,13 +2503,13 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
static int stale_bundle(struct dst_entry *dst)
{
- return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+ return !xfrm_bundle_ok((struct xfrm_dst *)dst);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
- dst->dev = dev->nd_net->loopback_dev;
+ dst->dev = dev_net(dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
}
@@ -2048,7 +2519,6 @@ EXPORT_SYMBOL(xfrm_dst_ifdown);
static void xfrm_link_failure(struct sk_buff *skb)
{
/* Impossible. Such dst must be popped before reaches point of failure. */
- return;
}
static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
@@ -2062,69 +2532,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
+void xfrm_garbage_collect(struct net *net)
{
- struct dst_entry *dst, **dstp;
-
- write_lock(&pol->lock);
- dstp = &pol->bundles;
- while ((dst=*dstp) != NULL) {
- if (func(dst)) {
- *dstp = dst->next;
- dst->next = *gc_list_p;
- *gc_list_p = dst;
- } else {
- dstp = &dst->next;
- }
- }
- write_unlock(&pol->lock);
+ flow_cache_flush(net);
}
+EXPORT_SYMBOL(xfrm_garbage_collect);
-static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
+static void xfrm_garbage_collect_deferred(struct net *net)
{
- struct dst_entry *gc_list = NULL;
- int dir;
-
- read_lock_bh(&xfrm_policy_lock);
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
- struct xfrm_policy *pol;
- struct hlist_node *entry;
- struct hlist_head *table;
- int i;
-
- hlist_for_each_entry(pol, entry,
- &xfrm_policy_inexact[dir], bydst)
- prune_one_bundle(pol, func, &gc_list);
-
- table = xfrm_policy_bydst[dir].table;
- for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry, table + i, bydst)
- prune_one_bundle(pol, func, &gc_list);
- }
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
- gc_list = dst->next;
- dst_free(dst);
- }
-}
-
-static int unused_bundle(struct dst_entry *dst)
-{
- return !atomic_read(&dst->__refcnt);
-}
-
-static void __xfrm_garbage_collect(void)
-{
- xfrm_prune_bundles(unused_bundle);
-}
-
-static int xfrm_flush_bundles(void)
-{
- xfrm_prune_bundles(stale_bundle);
- return 0;
+ flow_cache_flush_deferred(net);
}
static void xfrm_init_pmtu(struct dst_entry *dst)
@@ -2144,7 +2560,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
if (pmtu > route_mtu_cached)
pmtu = route_mtu_cached;
- dst->metrics[RTAX_MTU-1] = pmtu;
+ dst_metric_set(dst, RTAX_MTU, pmtu);
} while ((dst = dst->next));
}
@@ -2152,8 +2568,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
* still valid.
*/
-int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
- struct flowi *fl, int family, int strict)
+static int xfrm_bundle_ok(struct xfrm_dst *first)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;
@@ -2162,34 +2577,21 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
-#ifdef CONFIG_XFRM_SUB_POLICY
- if (fl) {
- if (first->origin && !flow_cache_uli_match(first->origin, fl))
- return 0;
- if (first->partner &&
- !xfrm_selector_match(first->partner, fl, family))
- return 0;
- }
-#endif
+
+ if (dst->flags & DST_XFRM_QUEUE)
+ return 1;
last = NULL;
do {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
- return 0;
- if (fl && pol &&
- !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
- return 0;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
- if (xdst->genid != dst->xfrm->genid)
+ if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
-
- if (strict && fl &&
- !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
- !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
+ if (xdst->num_pols > 0 &&
+ xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
mtu = dst_mtu(dst->child);
@@ -2219,7 +2621,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
mtu = xfrm_state_mtu(dst->xfrm, mtu);
if (mtu > last->route_mtu_cached)
mtu = last->route_mtu_cached;
- dst->metrics[RTAX_MTU-1] = mtu;
+ dst_metric_set(dst, RTAX_MTU, mtu);
if (last == first)
break;
@@ -2231,16 +2633,34 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
return 1;
}
-EXPORT_SYMBOL(xfrm_bundle_ok);
+static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
+{
+ return dst_metric_advmss(dst->path);
+}
+
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
+{
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst_mtu(dst->path);
+}
+
+static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
+{
+ return dst->path->ops->neigh_lookup(dst, skb, daddr);
+}
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
{
+ struct net *net;
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
- write_lock_bh(&xfrm_policy_afinfo_lock);
+ spin_lock(&xfrm_policy_afinfo_lock);
if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
err = -ENOBUFS;
else {
@@ -2249,15 +2669,42 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
dst_ops->kmem_cachep = xfrm_dst_cache;
if (likely(dst_ops->check == NULL))
dst_ops->check = xfrm_dst_check;
+ if (likely(dst_ops->default_advmss == NULL))
+ dst_ops->default_advmss = xfrm_default_advmss;
+ if (likely(dst_ops->mtu == NULL))
+ dst_ops->mtu = xfrm_mtu;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
dst_ops->link_failure = xfrm_link_failure;
+ if (likely(dst_ops->neigh_lookup == NULL))
+ dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
- afinfo->garbage_collect = __xfrm_garbage_collect;
- xfrm_policy_afinfo[afinfo->family] = afinfo;
+ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
+ rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
}
- write_unlock_bh(&xfrm_policy_afinfo_lock);
+ spin_unlock(&xfrm_policy_afinfo_lock);
+
+ rtnl_lock();
+ for_each_net(net) {
+ struct dst_ops *xfrm_dst_ops;
+
+ switch (afinfo->family) {
+ case AF_INET:
+ xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
+ break;
+#endif
+ default:
+ BUG();
+ }
+ *xfrm_dst_ops = *afinfo->dst_ops;
+ }
+ rtnl_unlock();
+
return err;
}
EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -2269,78 +2716,97 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
- write_lock_bh(&xfrm_policy_afinfo_lock);
+ spin_lock(&xfrm_policy_afinfo_lock);
if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
err = -EINVAL;
- else {
- struct dst_ops *dst_ops = afinfo->dst_ops;
- xfrm_policy_afinfo[afinfo->family] = NULL;
- dst_ops->kmem_cachep = NULL;
- dst_ops->check = NULL;
- dst_ops->negative_advice = NULL;
- dst_ops->link_failure = NULL;
- afinfo->garbage_collect = NULL;
- }
+ else
+ RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
+ NULL);
+ }
+ spin_unlock(&xfrm_policy_afinfo_lock);
+ if (!err) {
+ struct dst_ops *dst_ops = afinfo->dst_ops;
+
+ synchronize_rcu();
+
+ dst_ops->kmem_cachep = NULL;
+ dst_ops->check = NULL;
+ dst_ops->negative_advice = NULL;
+ dst_ops->link_failure = NULL;
+ afinfo->garbage_collect = NULL;
}
- write_unlock_bh(&xfrm_policy_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+static void __net_init xfrm_dst_ops_init(struct net *net)
{
struct xfrm_policy_afinfo *afinfo;
- if (unlikely(family >= NPROTO))
- return NULL;
- read_lock(&xfrm_policy_afinfo_lock);
- afinfo = xfrm_policy_afinfo[family];
- if (unlikely(!afinfo))
- read_unlock(&xfrm_policy_afinfo_lock);
- return afinfo;
-}
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
- read_unlock(&xfrm_policy_afinfo_lock);
+ rcu_read_lock();
+ afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
+ if (afinfo)
+ net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
+#if IS_ENABLED(CONFIG_IPV6)
+ afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
+ if (afinfo)
+ net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
+#endif
+ rcu_read_unlock();
}
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
-
- if (dev->nd_net != &init_net)
- return NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_DOWN:
- xfrm_flush_bundles();
+ xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
static struct notifier_block xfrm_dev_notifier = {
- xfrm_dev_event,
- NULL,
- 0
+ .notifier_call = xfrm_dev_event,
};
#ifdef CONFIG_XFRM_STATISTICS
-static int __init xfrm_statistics_init(void)
+static int __net_init xfrm_statistics_init(struct net *net)
{
- if (snmp_mib_init((void **)xfrm_statistics,
- sizeof(struct linux_xfrm_mib)) < 0)
+ int rv;
+ net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
+ if (!net->mib.xfrm_statistics)
return -ENOMEM;
+ rv = xfrm_proc_init(net);
+ if (rv < 0)
+ free_percpu(net->mib.xfrm_statistics);
+ return rv;
+}
+
+static void xfrm_statistics_fini(struct net *net)
+{
+ xfrm_proc_fini(net);
+ free_percpu(net->mib.xfrm_statistics);
+}
+#else
+static int __net_init xfrm_statistics_init(struct net *net)
+{
return 0;
}
+
+static void xfrm_statistics_fini(struct net *net)
+{
+}
#endif
-static void __init xfrm_policy_init(void)
+static int __net_init xfrm_policy_init(struct net *net)
{
unsigned int hmask, sz;
int dir;
- xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
+ if (net_eq(net, &init_net))
+ xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
sizeof(struct xfrm_dst),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
@@ -2348,38 +2814,129 @@ static void __init xfrm_policy_init(void)
hmask = 8 - 1;
sz = (hmask+1) * sizeof(struct hlist_head);
- xfrm_policy_byidx = xfrm_hash_alloc(sz);
- xfrm_idx_hmask = hmask;
- if (!xfrm_policy_byidx)
- panic("XFRM: failed to allocate byidx hash\n");
+ net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
+ if (!net->xfrm.policy_byidx)
+ goto out_byidx;
+ net->xfrm.policy_idx_hmask = hmask;
for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
struct xfrm_policy_hash *htab;
- INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
+ net->xfrm.policy_count[dir] = 0;
+ INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
- htab = &xfrm_policy_bydst[dir];
+ htab = &net->xfrm.policy_bydst[dir];
htab->table = xfrm_hash_alloc(sz);
- htab->hmask = hmask;
if (!htab->table)
- panic("XFRM: failed to allocate bydst hash\n");
+ goto out_bydst;
+ htab->hmask = hmask;
}
- INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
- register_netdevice_notifier(&xfrm_dev_notifier);
+ INIT_LIST_HEAD(&net->xfrm.policy_all);
+ INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
+ if (net_eq(net, &init_net))
+ register_netdevice_notifier(&xfrm_dev_notifier);
+ return 0;
+
+out_bydst:
+ for (dir--; dir >= 0; dir--) {
+ struct xfrm_policy_hash *htab;
+
+ htab = &net->xfrm.policy_bydst[dir];
+ xfrm_hash_free(htab->table, sz);
+ }
+ xfrm_hash_free(net->xfrm.policy_byidx, sz);
+out_byidx:
+ return -ENOMEM;
}
-void __init xfrm_init(void)
+static void xfrm_policy_fini(struct net *net)
{
-#ifdef CONFIG_XFRM_STATISTICS
- xfrm_statistics_init();
+ unsigned int sz;
+ int dir;
+
+ flush_work(&net->xfrm.policy_hash_work);
+#ifdef CONFIG_XFRM_SUB_POLICY
+ xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
#endif
- xfrm_state_init();
- xfrm_policy_init();
+ xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
+
+ WARN_ON(!list_empty(&net->xfrm.policy_all));
+
+ for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ struct xfrm_policy_hash *htab;
+
+ WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
+
+ htab = &net->xfrm.policy_bydst[dir];
+ sz = (htab->hmask + 1) * sizeof(struct hlist_head);
+ WARN_ON(!hlist_empty(htab->table));
+ xfrm_hash_free(htab->table, sz);
+ }
+
+ sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
+ WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
+ xfrm_hash_free(net->xfrm.policy_byidx, sz);
+}
+
+static int __net_init xfrm_net_init(struct net *net)
+{
+ int rv;
+
+ rv = xfrm_statistics_init(net);
+ if (rv < 0)
+ goto out_statistics;
+ rv = xfrm_state_init(net);
+ if (rv < 0)
+ goto out_state;
+ rv = xfrm_policy_init(net);
+ if (rv < 0)
+ goto out_policy;
+ xfrm_dst_ops_init(net);
+ rv = xfrm_sysctl_init(net);
+ if (rv < 0)
+ goto out_sysctl;
+ rv = flow_cache_init(net);
+ if (rv < 0)
+ goto out;
+
+ /* Initialize the per-net locks here */
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
+ rwlock_init(&net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
+ return 0;
+
+out:
+ xfrm_sysctl_fini(net);
+out_sysctl:
+ xfrm_policy_fini(net);
+out_policy:
+ xfrm_state_fini(net);
+out_state:
+ xfrm_statistics_fini(net);
+out_statistics:
+ return rv;
+}
+
+static void __net_exit xfrm_net_exit(struct net *net)
+{
+ flow_cache_fini(net);
+ xfrm_sysctl_fini(net);
+ xfrm_policy_fini(net);
+ xfrm_state_fini(net);
+ xfrm_statistics_fini(net);
+}
+
+static struct pernet_operations __net_initdata xfrm_net_ops = {
+ .init = xfrm_net_init,
+ .exit = xfrm_net_exit,
+};
+
+void __init xfrm_init(void)
+{
+ register_pernet_subsys(&xfrm_net_ops);
xfrm_input_init();
-#ifdef CONFIG_XFRM_STATISTICS
- xfrm_proc_init();
-#endif
}
#ifdef CONFIG_AUDITSYSCALL
@@ -2393,27 +2950,23 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
- switch(sel->family) {
+ switch (sel->family) {
case AF_INET:
- audit_log_format(audit_buf, " src=" NIPQUAD_FMT,
- NIPQUAD(sel->saddr.a4));
+ audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
if (sel->prefixlen_s != 32)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
- audit_log_format(audit_buf, " dst=" NIPQUAD_FMT,
- NIPQUAD(sel->daddr.a4));
+ audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
if (sel->prefixlen_d != 32)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
break;
case AF_INET6:
- audit_log_format(audit_buf, " src=" NIP6_FMT,
- NIP6(*(struct in6_addr *)sel->saddr.a6));
+ audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
if (sel->prefixlen_s != 128)
audit_log_format(audit_buf, " src_prefixlen=%d",
sel->prefixlen_s);
- audit_log_format(audit_buf, " dst=" NIP6_FMT,
- NIP6(*(struct in6_addr *)sel->daddr.a6));
+ audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
if (sel->prefixlen_d != 128)
audit_log_format(audit_buf, " dst_prefixlen=%d",
sel->prefixlen_d);
@@ -2421,15 +2974,14 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
}
}
-void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
- u32 auid, u32 secid)
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SPD-add");
if (audit_buf == NULL)
return;
- xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+ xfrm_audit_helper_usrinfo(task_valid, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
xfrm_audit_common_policyinfo(xp, audit_buf);
audit_log_end(audit_buf);
@@ -2437,14 +2989,14 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
- u32 auid, u32 secid)
+ bool task_valid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SPD-delete");
if (audit_buf == NULL)
return;
- xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+ xfrm_audit_helper_usrinfo(task_valid, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
xfrm_audit_common_policyinfo(xp, audit_buf);
audit_log_end(audit_buf);
@@ -2453,38 +3005,37 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
- struct xfrm_selector *sel_tgt)
+static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+ const struct xfrm_selector *sel_tgt)
{
if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
if (sel_tgt->family == sel_cmp->family &&
- xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
- sel_cmp->family) == 0 &&
- xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
- sel_cmp->family) == 0 &&
+ xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
+ sel_cmp->family) &&
+ xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
+ sel_cmp->family) &&
sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return 1;
+ return true;
}
} else {
if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
-static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
- u8 dir, u8 type)
+static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
+ u8 dir, u8 type, struct net *net)
{
struct xfrm_policy *pol, *ret = NULL;
- struct hlist_node *entry;
struct hlist_head *chain;
u32 priority = ~0U;
- read_lock_bh(&xfrm_policy_lock);
- chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
+ chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
+ hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
@@ -2492,8 +3043,8 @@ static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
break;
}
}
- chain = &xfrm_policy_inexact[dir];
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ chain = &net->xfrm.policy_inexact[dir];
+ hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type &&
pol->priority < priority) {
@@ -2505,12 +3056,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
if (ret)
xfrm_pol_hold(ret);
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
-static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
+static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
{
int match = 0;
@@ -2519,10 +3070,10 @@ static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
switch (t->mode) {
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
- if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
- m->old_family) == 0 &&
- xfrm_addr_cmp(&t->saddr, &m->old_saddr,
- m->old_family) == 0) {
+ if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
+ m->old_family) &&
+ xfrm_addr_equal(&t->saddr, &m->old_saddr,
+ m->old_family)) {
match = 1;
}
break;
@@ -2544,11 +3095,10 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
struct xfrm_migrate *m, int num_migrate)
{
struct xfrm_migrate *mp;
- struct dst_entry *dst;
int i, j, n = 0;
write_lock_bh(&pol->lock);
- if (unlikely(pol->dead)) {
+ if (unlikely(pol->walk.dead)) {
/* target policy has been deleted */
write_unlock_bh(&pol->lock);
return -ENOENT;
@@ -2569,10 +3119,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
sizeof(pol->xfrm_vec[i].saddr));
pol->xfrm_vec[i].encap_family = mp->new_family;
/* flush bundles */
- while ((dst = pol->bundles) != NULL) {
- pol->bundles = dst->next;
- dst_free(dst);
- }
+ atomic_inc(&pol->genid);
}
}
@@ -2584,7 +3131,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
return 0;
}
-static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
+static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
{
int i, j;
@@ -2592,10 +3139,10 @@ static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
return -EINVAL;
for (i = 0; i < num_migrate; i++) {
- if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
- m[i].old_family) == 0) &&
- (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
- m[i].old_family) == 0))
+ if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
+ m[i].old_family) &&
+ xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
+ m[i].old_family))
return -EINVAL;
if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
@@ -2618,8 +3165,9 @@ static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
return 0;
}
-int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_migrate)
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_migrate,
+ struct xfrm_kmaddress *k, struct net *net)
{
int i, err, nx_cur = 0, nx_new = 0;
struct xfrm_policy *pol = NULL;
@@ -2632,14 +3180,14 @@ int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
goto out;
/* Stage 1 - find policy */
- if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
goto out;
}
/* Stage 2 - find and update state(s) */
for (i = 0, mp = m; i < num_migrate; i++, mp++) {
- if ((x = xfrm_migrate_state_find(mp))) {
+ if ((x = xfrm_migrate_state_find(mp, net))) {
x_cur[nx_cur] = x;
nx_cur++;
if ((xc = xfrm_state_migrate(x, mp))) {
@@ -2663,7 +3211,7 @@ int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
}
/* Stage 5 - announce */
- km_migrate(sel, dir, type, m, num_migrate);
+ km_migrate(sel, dir, type, m, num_migrate, k);
xfrm_pol_put(pol);