aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-29 22:54:01 +1100
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-29 22:54:01 +1100
commit0ba6c33bcddc64a54b5f1c25a696c4767dc76292 (patch)
tree62e616f97a4762d8e75bf732e4827af2d15d52c5 /net/ipv4/route.c
parent21af0297c7e56024a5ccc4d8ad2a590f9ec371ba (diff)
parent85040bcb4643cba578839e953f25e2d1965d83d0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.25
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.25: (1470 commits) [IPV6] ADDRLABEL: Fix double free on label deletion. [PPP]: Sparse warning fixes. [IPV4] fib_trie: remove unneeded NULL check [IPV4] fib_trie: More whitespace cleanup. [NET_SCHED]: Use nla_policy for attribute validation in ematches [NET_SCHED]: Use nla_policy for attribute validation in actions [NET_SCHED]: Use nla_policy for attribute validation in classifiers [NET_SCHED]: Use nla_policy for attribute validation in packet schedulers [NET_SCHED]: sch_api: introduce constant for rate table size [NET_SCHED]: Use typeful attribute parsing helpers [NET_SCHED]: Use typeful attribute construction helpers [NET_SCHED]: Use NLA_PUT_STRING for string dumping [NET_SCHED]: Use nla_nest_start/nla_nest_end [NET_SCHED]: Propagate nla_parse return value [NET_SCHED]: act_api: use PTR_ERR in tcf_action_init/tcf_action_get [NET_SCHED]: act_api: use nlmsg_parse [NET_SCHED]: act_api: fix netlink API conversion bug [NET_SCHED]: sch_netem: use nla_parse_nested_compat [NET_SCHED]: sch_atm: fix format string warning [NETNS]: Add namespace for ICMP replying code. ...
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c427
1 files changed, 253 insertions, 174 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 28484f396b0..896c768e41a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -92,6 +92,7 @@
#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
+#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -132,13 +133,14 @@ static int ip_rt_mtu_expires = 10 * 60 * HZ;
static int ip_rt_min_pmtu = 512 + 20 + 20;
static int ip_rt_min_advmss = 256;
static int ip_rt_secret_interval = 10 * 60 * HZ;
+static int ip_rt_flush_expected;
static unsigned long rt_deadline;
#define RTprint(a...) printk(KERN_DEBUG a)
static struct timer_list rt_flush_timer;
-static void rt_check_expire(struct work_struct *work);
-static DECLARE_DELAYED_WORK(expires_work, rt_check_expire);
+static void rt_worker_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
static struct timer_list rt_secret_timer;
/*
@@ -152,7 +154,7 @@ static void ipv4_dst_ifdown(struct dst_entry *dst,
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
-static int rt_garbage_collect(void);
+static int rt_garbage_collect(struct dst_ops *ops);
static struct dst_ops ipv4_dst_ops = {
@@ -165,6 +167,7 @@ static struct dst_ops ipv4_dst_ops = {
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
+ .local_out = ip_local_out,
.entry_size = sizeof(struct rtable),
};
@@ -232,16 +235,25 @@ struct rt_hash_bucket {
static spinlock_t *rt_hash_locks;
# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-# define rt_hash_lock_init() { \
- int i; \
- rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
- if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
- for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
- spin_lock_init(&rt_hash_locks[i]); \
- }
+
+static __init void rt_hash_lock_init(void)
+{
+ int i;
+
+ rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
+ GFP_KERNEL);
+ if (!rt_hash_locks)
+ panic("IP: failed to allocate rt_hash_locks\n");
+
+ for (i = 0; i < RT_HASH_LOCK_SZ; i++)
+ spin_lock_init(&rt_hash_locks[i]);
+}
#else
# define rt_hash_lock_addr(slot) NULL
-# define rt_hash_lock_init()
+
+static inline void rt_hash_lock_init(void)
+{
+}
#endif
static struct rt_hash_bucket *rt_hash_table;
@@ -478,6 +490,83 @@ static const struct file_operations rt_cpu_seq_fops = {
.release = seq_release,
};
+#ifdef CONFIG_NET_CLS_ROUTE
+static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ unsigned int i;
+
+ if ((offset & 3) || (length & 3))
+ return -EIO;
+
+ if (offset >= sizeof(struct ip_rt_acct) * 256) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
+ length = sizeof(struct ip_rt_acct) * 256 - offset;
+ *eof = 1;
+ }
+
+ offset /= sizeof(u32);
+
+ if (length > 0) {
+ u32 *dst = (u32 *) buffer;
+
+ *start = buffer;
+ memset(dst, 0, length);
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+ u32 *src;
+
+ src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
+ for (j = 0; j < length/4; j++)
+ dst[j] += src[j];
+ }
+ }
+ return length;
+}
+#endif
+
+static __init int ip_rt_proc_init(struct net *net)
+{
+ struct proc_dir_entry *pde;
+
+ pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
+ &rt_cache_seq_fops);
+ if (!pde)
+ goto err1;
+
+ pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat);
+ if (!pde)
+ goto err2;
+
+ pde->proc_fops = &rt_cpu_seq_fops;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+ pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
+ ip_rt_acct_read, NULL);
+ if (!pde)
+ goto err3;
+#endif
+ return 0;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+err3:
+ remove_proc_entry("rt_cache", net->proc_net_stat);
+#endif
+err2:
+ remove_proc_entry("rt_cache", net->proc_net);
+err1:
+ return -ENOMEM;
+}
+#else
+static inline int ip_rt_proc_init(struct net *net)
+{
+ return 0;
+}
#endif /* CONFIG_PROC_FS */
static __inline__ void rt_free(struct rtable *rt)
@@ -559,7 +648,41 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
(fl1->iif ^ fl2->iif)) == 0;
}
-static void rt_check_expire(struct work_struct *work)
+static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
+{
+ return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net;
+}
+
+/*
+ * Perform a full scan of hash table and free all entries.
+ * Can be called by a softirq or a process.
+ * In the later case, we want to be reschedule if necessary
+ */
+static void rt_do_flush(int process_context)
+{
+ unsigned int i;
+ struct rtable *rth, *next;
+
+ for (i = 0; i <= rt_hash_mask; i++) {
+ if (process_context && need_resched())
+ cond_resched();
+ rth = rt_hash_table[i].chain;
+ if (!rth)
+ continue;
+
+ spin_lock_bh(rt_hash_lock_addr(i));
+ rth = rt_hash_table[i].chain;
+ rt_hash_table[i].chain = NULL;
+ spin_unlock_bh(rt_hash_lock_addr(i));
+
+ for (; rth; rth = next) {
+ next = rth->u.dst.rt_next;
+ rt_free(rth);
+ }
+ }
+}
+
+static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
@@ -605,33 +728,33 @@ static void rt_check_expire(struct work_struct *work)
spin_unlock_bh(rt_hash_lock_addr(i));
}
rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * If a whole flush was scheduled, it is done.
+ * Else, we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ if (ip_rt_flush_expected) {
+ ip_rt_flush_expected = 0;
+ rt_do_flush(1);
+ } else
+ rt_check_expire();
schedule_delayed_work(&expires_work, ip_rt_gc_interval);
}
/* This can run from both BH and non-BH contexts, the latter
* in the case of a forced flush event.
*/
-static void rt_run_flush(unsigned long dummy)
+static void rt_run_flush(unsigned long process_context)
{
- int i;
- struct rtable *rth, *next;
-
rt_deadline = 0;
get_random_bytes(&rt_hash_rnd, 4);
- for (i = rt_hash_mask; i >= 0; i--) {
- spin_lock_bh(rt_hash_lock_addr(i));
- rth = rt_hash_table[i].chain;
- if (rth)
- rt_hash_table[i].chain = NULL;
- spin_unlock_bh(rt_hash_lock_addr(i));
-
- for (; rth; rth = next) {
- next = rth->u.dst.rt_next;
- rt_free(rth);
- }
- }
+ rt_do_flush(process_context);
}
static DEFINE_SPINLOCK(rt_flush_lock);
@@ -665,7 +788,7 @@ void rt_cache_flush(int delay)
if (delay <= 0) {
spin_unlock_bh(&rt_flush_lock);
- rt_run_flush(0);
+ rt_run_flush(user_mode);
return;
}
@@ -676,12 +799,17 @@ void rt_cache_flush(int delay)
spin_unlock_bh(&rt_flush_lock);
}
+/*
+ * We change rt_hash_rnd and ask next rt_worker_func() invocation
+ * to perform a flush in process context
+ */
static void rt_secret_rebuild(unsigned long dummy)
{
- unsigned long now = jiffies;
-
- rt_cache_flush(0);
- mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
+ get_random_bytes(&rt_hash_rnd, 4);
+ ip_rt_flush_expected = 1;
+ cancel_delayed_work(&expires_work);
+ schedule_delayed_work(&expires_work, HZ/10);
+ mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval);
}
/*
@@ -697,7 +825,7 @@ static void rt_secret_rebuild(unsigned long dummy)
and when load increases it reduces to limit cache size.
*/
-static int rt_garbage_collect(void)
+static int rt_garbage_collect(struct dst_ops *ops)
{
static unsigned long expire = RT_GC_TIMEOUT;
static unsigned long last_gc;
@@ -728,14 +856,14 @@ static int rt_garbage_collect(void)
equilibrium = ipv4_dst_ops.gc_thresh;
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
if (goal > 0) {
- equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
}
} else {
/* We are in dangerous area. Try to reduce cache really
* aggressively.
*/
- goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
}
@@ -838,7 +966,7 @@ restart:
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
- if (compare_keys(&rth->fl, &rt->fl)) {
+ if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
/* Put it first */
*rthp = rth->u.dst.rt_next;
/*
@@ -912,7 +1040,7 @@ restart:
int saved_int = ip_rt_gc_min_interval;
ip_rt_gc_elasticity = 1;
ip_rt_gc_min_interval = 0;
- rt_garbage_collect();
+ rt_garbage_collect(&ipv4_dst_ops);
ip_rt_gc_min_interval = saved_int;
ip_rt_gc_elasticity = saved_elasticity;
goto restart;
@@ -1031,7 +1159,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
return;
if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
- || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))
+ || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
+ || ipv4_is_zeronet(new_gw))
goto reject_redirect;
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
@@ -1040,7 +1169,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
goto reject_redirect;
} else {
- if (inet_addr_type(new_gw) != RTN_UNICAST)
+ if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST)
goto reject_redirect;
}
@@ -1291,7 +1420,8 @@ static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
return 68;
}
-unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
+unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
+ unsigned short new_mtu)
{
int i;
unsigned short old_mtu = ntohs(iph->tot_len);
@@ -1314,7 +1444,8 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
rth->rt_dst == daddr &&
rth->rt_src == iph->saddr &&
rth->fl.iif == 0 &&
- !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
+ !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
+ rth->u.dst.dev->nd_net == net) {
unsigned short mtu = new_mtu;
if (new_mtu < 68 || new_mtu >= old_mtu) {
@@ -1389,8 +1520,9 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
{
struct rtable *rt = (struct rtable *) dst;
struct in_device *idev = rt->idev;
- if (dev != init_net.loopback_dev && idev && idev->dev == dev) {
- struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev);
+ if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) {
+ struct in_device *loopback_idev =
+ in_dev_get(dev->nd_net->loopback_dev);
if (loopback_idev) {
rt->idev = loopback_idev;
in_dev_put(idev);
@@ -1434,7 +1566,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
if (rt->fl.iif == 0)
src = rt->rt_src;
- else if (fib_lookup(&rt->fl, &res) == 0) {
+ else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) {
src = FIB_RES_PREFSRC(res);
fib_res_put(&res);
} else
@@ -1509,12 +1641,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (in_dev == NULL)
return -EINVAL;
- if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
- skb->protocol != htons(ETH_P_IP))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ZERONET(saddr)) {
- if (!LOCAL_MCAST(daddr))
+ if (ipv4_is_zeronet(saddr)) {
+ if (!ipv4_is_local_multicast(daddr))
goto e_inval;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else if (fib_validate_source(saddr, 0, tos, 0,
@@ -1556,7 +1688,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
}
#ifdef CONFIG_IP_MROUTE
- if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+ if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
rth->u.dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
@@ -1643,7 +1775,7 @@ static inline int __mkroute_input(struct sk_buff *skb,
if (err)
flags |= RTCF_DIRECTSRC;
- if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
+ if (out_dev == in_dev && err && !(flags & RTCF_MASQ) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
flags |= RTCF_DOREDIRECT;
@@ -1652,7 +1784,7 @@ static inline int __mkroute_input(struct sk_buff *skb,
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*/
- if (out_dev == in_dev && !(flags & RTCF_DNAT)) {
+ if (out_dev == in_dev) {
err = -EINVAL;
goto cleanup;
}
@@ -1756,6 +1888,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
__be32 spec_dst;
int err = -EINVAL;
int free_res = 0;
+ struct net * net = dev->nd_net;
/* IP on this device is disabled. */
@@ -1766,7 +1899,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup.
*/
- if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ ipv4_is_loopback(saddr))
goto martian_source;
if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
@@ -1775,16 +1909,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
- if (ZERONET(saddr))
+ if (ipv4_is_zeronet(saddr))
goto martian_source;
- if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
+ if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
+ ipv4_is_loopback(daddr))
goto martian_destination;
/*
* Now we are ready to route packet.
*/
- if ((err = fib_lookup(&fl, &res)) != 0) {
+ if ((err = fib_lookup(net, &fl, &res)) != 0) {
if (!IN_DEV_FORWARD(in_dev))
goto e_hostunreach;
goto no_route;
@@ -1799,7 +1934,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res.type == RTN_LOCAL) {
int result;
result = fib_validate_source(saddr, daddr, tos,
- init_net.loopback_dev->ifindex,
+ net->loopback_dev->ifindex,
dev, &spec_dst, &itag);
if (result < 0)
goto martian_source;
@@ -1825,7 +1960,7 @@ brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ZERONET(saddr))
+ if (ipv4_is_zeronet(saddr))
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else {
err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
@@ -1861,7 +1996,7 @@ local_input:
#endif
rth->rt_iif =
rth->fl.iif = dev->ifindex;
- rth->u.dst.dev = init_net.loopback_dev;
+ rth->u.dst.dev = net->loopback_dev;
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->rt_gateway = daddr;
@@ -1921,7 +2056,9 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct rtable * rth;
unsigned hash;
int iif = dev->ifindex;
+ struct net *net;
+ net = skb->dev->nd_net;
tos &= IPTOS_RT_MASK;
hash = rt_hash(daddr, saddr, iif);
@@ -1933,7 +2070,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->fl.iif == iif &&
rth->fl.oif == 0 &&
rth->fl.mark == skb->mark &&
- rth->fl.fl4_tos == tos) {
+ rth->fl.fl4_tos == tos &&
+ rth->u.dst.dev->nd_net == net) {
dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
@@ -1955,7 +2093,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Note, that multicast routers are not affected, because
route cache entry is created eventually.
*/
- if (MULTICAST(daddr)) {
+ if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev;
rcu_read_lock();
@@ -1964,7 +2102,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
ip_hdr(skb)->protocol);
if (our
#ifdef CONFIG_IP_MROUTE
- || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
+ || (!ipv4_is_local_multicast(daddr) &&
+ IN_DEV_MFORWARD(in_dev))
#endif
) {
rcu_read_unlock();
@@ -1990,14 +2129,14 @@ static inline int __mkroute_output(struct rtable **result,
u32 tos = RT_FL_TOS(oldflp);
int err = 0;
- if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
return -EINVAL;
if (fl->fl4_dst == htonl(0xFFFFFFFF))
res->type = RTN_BROADCAST;
- else if (MULTICAST(fl->fl4_dst))
+ else if (ipv4_is_multicast(fl->fl4_dst))
res->type = RTN_MULTICAST;
- else if (BADCLASS(fl->fl4_dst) || ZERONET(fl->fl4_dst))
+ else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
return -EINVAL;
if (dev_out->flags & IFF_LOOPBACK)
@@ -2077,7 +2216,7 @@ static inline int __mkroute_output(struct rtable **result,
#ifdef CONFIG_IP_MROUTE
if (res->type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
- !LOCAL_MCAST(oldflp->fl4_dst)) {
+ !ipv4_is_local_multicast(oldflp->fl4_dst)) {
rth->u.dst.input = ip_mr_input;
rth->u.dst.output = ip_mc_output;
}
@@ -2119,7 +2258,8 @@ static inline int ip_mkroute_output(struct rtable **rp,
* Major route resolver routine.
*/
-static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
+static int ip_route_output_slow(struct net *net, struct rtable **rp,
+ const struct flowi *oldflp)
{
u32 tos = RT_FL_TOS(oldflp);
struct flowi fl = { .nl_u = { .ip4_u =
@@ -2131,7 +2271,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
RT_SCOPE_UNIVERSE),
} },
.mark = oldflp->mark,
- .iif = init_net.loopback_dev->ifindex,
+ .iif = net->loopback_dev->ifindex,
.oif = oldflp->oif };
struct fib_result res;
unsigned flags = 0;
@@ -2147,26 +2287,27 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
if (oldflp->fl4_src) {
err = -EINVAL;
- if (MULTICAST(oldflp->fl4_src) ||
- BADCLASS(oldflp->fl4_src) ||
- ZERONET(oldflp->fl4_src))
+ if (ipv4_is_multicast(oldflp->fl4_src) ||
+ ipv4_is_lbcast(oldflp->fl4_src) ||
+ ipv4_is_zeronet(oldflp->fl4_src))
goto out;
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(oldflp->fl4_src);
+ dev_out = ip_dev_find(net, oldflp->fl4_src);
if (dev_out == NULL)
goto out;
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
- 1. ip_dev_find(saddr) can return wrong iface, if saddr is
- assigned to multiple interfaces.
+ 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
+ is assigned to multiple interfaces.
2. Moreover, we are allowed to send packets with saddr
of another iface. --ANK
*/
if (oldflp->oif == 0
- && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
+ && (ipv4_is_multicast(oldflp->fl4_dst) ||
+ oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
@@ -2192,7 +2333,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
if (oldflp->oif) {
- dev_out = dev_get_by_index(&init_net, oldflp->oif);
+ dev_out = dev_get_by_index(net, oldflp->oif);
err = -ENODEV;
if (dev_out == NULL)
goto out;
@@ -2203,14 +2344,15 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
goto out; /* Wrong error code */
}
- if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
+ if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
+ oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
if (!fl.fl4_src)
fl.fl4_src = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
goto make_route;
}
if (!fl.fl4_src) {
- if (MULTICAST(oldflp->fl4_dst))
+ if (ipv4_is_multicast(oldflp->fl4_dst))
fl.fl4_src = inet_select_addr(dev_out, 0,
fl.fl4_scope);
else if (!oldflp->fl4_dst)
@@ -2225,15 +2367,15 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
if (dev_out)
dev_put(dev_out);
- dev_out = init_net.loopback_dev;
+ dev_out = net->loopback_dev;
dev_hold(dev_out);
- fl.oif = init_net.loopback_dev->ifindex;
+ fl.oif = net->loopback_dev->ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
- if (fib_lookup(&fl, &res)) {
+ if (fib_lookup(net, &fl, &res)) {
res.fi = NULL;
if (oldflp->oif) {
/* Apparently, routing tables are wrong. Assume,
@@ -2272,7 +2414,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
fl.fl4_src = fl.fl4_dst;
if (dev_out)
dev_put(dev_out);
- dev_out = init_net.loopback_dev;
+ dev_out = net->loopback_dev;
dev_hold(dev_out);
fl.oif = dev_out->ifindex;
if (res.fi)
@@ -2288,7 +2430,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
else
#endif
if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(&fl, &res);
+ fib_select_default(net, &fl, &res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2311,7 +2453,8 @@ make_route:
out: return err;
}
-int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
+int __ip_route_output_key(struct net *net, struct rtable **rp,
+ const struct flowi *flp)
{
unsigned hash;
struct rtable *rth;
@@ -2327,7 +2470,8 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rth->fl.oif == flp->oif &&
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
- (IPTOS_RT_MASK | RTO_ONLINK))) {
+ (IPTOS_RT_MASK | RTO_ONLINK)) &&
+ rth->u.dst.dev->nd_net == net) {
dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
@@ -2338,7 +2482,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
}
rcu_read_unlock_bh();
- return ip_route_output_slow(rp, flp);
+ return ip_route_output_slow(net, rp, flp);
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
@@ -2357,12 +2501,6 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
};
-static int ipv4_blackhole_output(struct sk_buff *skb)
-{
- kfree_skb(skb);
- return 0;
-}
-
static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
{
struct rtable *ort = *rp;
@@ -2374,8 +2512,8 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock
atomic_set(&new->__refcnt, 1);
new->__use = 1;
- new->input = ipv4_blackhole_output;
- new->output = ipv4_blackhole_output;
+ new->input = dst_discard;
+ new->output = dst_discard;
memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
new->dev = ort->u.dst.dev;
@@ -2406,11 +2544,12 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock
return (rt ? 0 : -ENOMEM);
}
-int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
+int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
+ struct sock *sk, int flags)
{
int err;
- if ((err = __ip_route_output_key(rp, flp)) != 0)
+ if ((err = __ip_route_output_key(net, rp, flp)) != 0)
return err;
if (flp->proto) {
@@ -2418,7 +2557,8 @@ int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk,
flp->fl4_src = (*rp)->rt_src;
if (!flp->fl4_dst)
flp->fl4_dst = (*rp)->rt_dst;
- err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, flags);
+ err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
+ flags ? XFRM_LOOKUP_WAIT : 0);
if (err == -EREMOTE)
err = ipv4_dst_blackhole(rp, flp, sk);
@@ -2430,9 +2570,9 @@ int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk,
EXPORT_SYMBOL_GPL(ip_route_output_flow);
-int ip_route_output_key(struct rtable **rp, struct flowi *flp)
+int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
{
- return ip_route_output_flow(rp, flp, NULL, 0);
+ return ip_route_output_flow(net, rp, flp, NULL, 0);
}
static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
@@ -2499,8 +2639,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
#ifdef CONFIG_IP_MROUTE
__be32 dst = rt->rt_dst;
- if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
- IPV4_DEVCONF_ALL(MC_FORWARDING)) {
+ if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
+ IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
int err = ipmr_get_route(skb, r, nowait);
if (err <= 0) {
if (!nowait) {
@@ -2531,6 +2671,7 @@ nla_put_failure:
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
{
+ struct net *net = in_skb->sk->sk_net;
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
@@ -2540,6 +2681,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
int err;
struct sk_buff *skb;
+ if (net != &init_net)
+ return -EINVAL;
+
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
if (err < 0)
goto errout;
@@ -2595,7 +2739,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
},
.oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
};
- err = ip_route_output_key(&rt, &fl);
+ err = ip_route_output_key(&init_net, &rt, &fl);
}
if (err)
@@ -2610,7 +2754,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
if (err <= 0)
goto errout_free;
- err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
+ err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
errout:
return err;
@@ -2862,51 +3006,7 @@ ctl_table ipv4_route_table[] = {
#endif
#ifdef CONFIG_NET_CLS_ROUTE
-struct ip_rt_acct *ip_rt_acct;
-
-/* This code sucks. But you should have seen it before! --RR */
-
-/* IP route accounting ptr for this logical cpu number. */
-#define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
-
-#ifdef CONFIG_PROC_FS
-static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
-{
- unsigned int i;
-
- if ((offset & 3) || (length & 3))
- return -EIO;
-
- if (offset >= sizeof(struct ip_rt_acct) * 256) {
- *eof = 1;
- return 0;
- }
-
- if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
- length = sizeof(struct ip_rt_acct) * 256 - offset;
- *eof = 1;
- }
-
- offset /= sizeof(u32);
-
- if (length > 0) {
- u32 *dst = (u32 *) buffer;
-
- *start = buffer;
- memset(dst, 0, length);
-
- for_each_possible_cpu(i) {
- unsigned int j;
- u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
-
- for (j = 0; j < length/4; j++)
- dst[j] += src[j];
- }
- }
- return length;
-}
-#endif /* CONFIG_PROC_FS */
+struct ip_rt_acct *ip_rt_acct __read_mostly;
#endif /* CONFIG_NET_CLS_ROUTE */
static __initdata unsigned long rhash_entries;
@@ -2927,16 +3027,9 @@ int __init ip_rt_init(void)
(jiffies ^ (jiffies >> 7)));
#ifdef CONFIG_NET_CLS_ROUTE
- {
- int order;
- for (order = 0;
- (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
- /* NOTHING */;
- ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
+ ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
- memset(ip_rt_acct, 0, PAGE_SIZE << order);
- }
#endif
ipv4_dst_ops.kmem_cachep =
@@ -2964,10 +3057,8 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
- init_timer(&rt_flush_timer);
- rt_flush_timer.function = rt_run_flush;
- init_timer(&rt_secret_timer);
- rt_secret_timer.function = rt_secret_rebuild;
+ setup_timer(&rt_flush_timer, rt_run_flush, 0);
+ setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
@@ -2979,20 +3070,8 @@ int __init ip_rt_init(void)
ip_rt_secret_interval;
add_timer(&rt_secret_timer);
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
- if (!proc_net_fops_create(&init_net, "rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
- !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
- init_net.proc_net_stat))) {
- return -ENOMEM;
- }
- rtstat_pde->proc_fops = &rt_cpu_seq_fops;
- }
-#ifdef CONFIG_NET_CLS_ROUTE
- create_proc_read_entry("rt_acct", 0, init_net.proc_net, ip_rt_acct_read, NULL);
-#endif
-#endif
+ if (ip_rt_proc_init(&init_net))
+ printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();