diff options
Diffstat (limited to 'net/ipv4/ipmr.c')
| -rw-r--r-- | net/ipv4/ipmr.c | 1738 |
1 files changed, 1225 insertions, 513 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 54596f73eff..65bcaa78904 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -22,11 +22,10 @@ * overflow. * Carlos Picoto : PIMv1 Support * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header - * Relax this requrement to work with older peers. + * Relax this requirement to work with older peers. * */ -#include <asm/system.h> #include <asm/uaccess.h> #include <linux/types.h> #include <linux/capability.h> @@ -47,6 +46,7 @@ #include <linux/mroute.h> #include <linux/init.h> #include <linux/if_ether.h> +#include <linux/slab.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> @@ -59,16 +59,48 @@ #include <linux/notifier.h> #include <linux/if_arp.h> #include <linux/netfilter_ipv4.h> -#include <net/ipip.h> +#include <linux/compat.h> +#include <linux/export.h> +#include <net/ip_tunnels.h> #include <net/checksum.h> #include <net/netlink.h> +#include <net/fib_rules.h> +#include <linux/netconf.h> #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) #define CONFIG_IP_PIMSM 1 #endif +struct mr_table { + struct list_head list; +#ifdef CONFIG_NET_NS + struct net *net; +#endif + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct list_head mfc_cache_array[MFC_LINES]; + struct vif_device vif_table[MAXVIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; +#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) + int mroute_reg_vif_num; +#endif +}; + +struct ipmr_rule { + struct fib_rule common; +}; + +struct ipmr_result { + struct mr_table *mrt; +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. - Note that the changes are semaphored via rtnl_lock. + * Note that the changes are semaphored via rtnl_lock. */ static DEFINE_RWLOCK(mrt_lock); @@ -77,29 +109,248 @@ static DEFINE_RWLOCK(mrt_lock); * Multicast router control variables */ -#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) - -static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ +#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); /* We return to original Alan's scheme. Hash table of resolved - entries is changed only in process context and protected - with weak lock mrt_lock. Queue of unresolved entries is protected - with strong spinlock mfc_unres_lock. - - In this case data path is free of exclusive locks at all. + * entries is changed only in process context and protected + * with weak lock mrt_lock. Queue of unresolved entries is protected + * with strong spinlock mfc_unres_lock. + * + * In this case data path is free of exclusive locks at all. */ static struct kmem_cache *mrt_cachep __read_mostly; -static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); -static int ipmr_cache_report(struct net *net, +static struct mr_table *ipmr_new_table(struct net *net, u32 id); +static void ipmr_free_table(struct mr_table *mrt); + +static void ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local); +static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); -static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mfc_cache *c, struct rtmsg *rtm); +static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, + int cmd); +static void mroute_clean_tables(struct mr_table *mrt); +static void ipmr_expire_process(unsigned long arg); + +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES +#define ipmr_for_each_table(mrt, net) \ + list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) + +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + ipmr_for_each_table(mrt, net) { + if (mrt->id == id) + return mrt; + } + return NULL; +} + +static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, + struct mr_table **mrt) +{ + int err; + struct ipmr_result res; + struct fib_lookup_arg arg = { + .result = &res, + .flags = FIB_LOOKUP_NOREF, + }; + + err = fib_rules_lookup(net->ipv4.mr_rules_ops, + flowi4_to_flowi(flp4), 0, &arg); + if (err < 0) + return err; + *mrt = res.mrt; + return 0; +} + +static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, + int flags, struct fib_lookup_arg *arg) +{ + struct ipmr_result *res = arg->result; + struct mr_table *mrt; + + switch (rule->action) { + case FR_ACT_TO_TBL: + break; + case FR_ACT_UNREACHABLE: + return -ENETUNREACH; + case FR_ACT_PROHIBIT: + return -EACCES; + case FR_ACT_BLACKHOLE: + default: + return -EINVAL; + } + + mrt = ipmr_get_table(rule->fr_net, rule->table); + if (mrt == NULL) + return -EAGAIN; + res->mrt = mrt; + return 0; +} + +static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +{ + return 1; +} + +static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { + FRA_GENERIC_POLICY, +}; + +static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh, struct nlattr **tb) +{ + return 0; +} + +static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, + struct nlattr **tb) +{ + return 1; +} + +static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh) +{ + frh->dst_len = 0; + frh->src_len = 0; + frh->tos = 0; + return 0; +} + +static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { + .family = RTNL_FAMILY_IPMR, + .rule_size = sizeof(struct ipmr_rule), + .addr_size = sizeof(u32), + .action = ipmr_rule_action, + .match = ipmr_rule_match, + .configure = ipmr_rule_configure, + .compare = ipmr_rule_compare, + .default_pref = fib_default_rule_pref, + .fill = ipmr_rule_fill, + .nlgroup = RTNLGRP_IPV4_RULE, + .policy = ipmr_rule_policy, + .owner = THIS_MODULE, +}; + +static int __net_init ipmr_rules_init(struct net *net) +{ + struct fib_rules_ops *ops; + struct mr_table *mrt; + int err; + + ops = fib_rules_register(&ipmr_rules_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + + INIT_LIST_HEAD(&net->ipv4.mr_tables); + + mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) { + err = -ENOMEM; + goto err1; + } -static struct timer_list ipmr_expire_timer; + err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); + if (err < 0) + goto err2; + + net->ipv4.mr_rules_ops = ops; + return 0; + +err2: + kfree(mrt); +err1: + fib_rules_unregister(ops); + return err; +} + +static void __net_exit ipmr_rules_exit(struct net *net) +{ + struct mr_table *mrt, *next; + + list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { + list_del(&mrt->list); + ipmr_free_table(mrt); + } + fib_rules_unregister(net->ipv4.mr_rules_ops); +} +#else +#define ipmr_for_each_table(mrt, net) \ + for (mrt = net->ipv4.mrt; mrt; mrt = NULL) + +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + return net->ipv4.mrt; +} + +static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, + struct mr_table **mrt) +{ + *mrt = net->ipv4.mrt; + return 0; +} + +static int __net_init ipmr_rules_init(struct net *net) +{ + net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); + return net->ipv4.mrt ? 0 : -ENOMEM; +} + +static void __net_exit ipmr_rules_exit(struct net *net) +{ + ipmr_free_table(net->ipv4.mrt); +} +#endif + +static struct mr_table *ipmr_new_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + unsigned int i; + + mrt = ipmr_get_table(net, id); + if (mrt != NULL) + return mrt; + + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) + return NULL; + write_pnet(&mrt->net, net); + mrt->id = id; + + /* Forwarding cache */ + for (i = 0; i < MFC_LINES; i++) + INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); + + INIT_LIST_HEAD(&mrt->mfc_unres_queue); + + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, + (unsigned long)mrt); + +#ifdef CONFIG_IP_PIMSM + mrt->mroute_reg_vif_num = -1; +#endif +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES + list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); +#endif + return mrt; +} + +static void ipmr_free_table(struct mr_table *mrt) +{ + del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ @@ -163,9 +414,9 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) set_fs(KERNEL_DS); err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); set_fs(oldfs); - } else + } else { err = -EOPNOTSUPP; - + } dev = NULL; if (err == 0 && @@ -177,6 +428,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) goto failure; ipv4_devconf_setall(in_dev); + neigh_parms_data_state_setall(in_dev->arp_parms); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; if (dev_open(dev)) @@ -200,12 +452,24 @@ failure: static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); + struct mr_table *mrt; + struct flowi4 fl4 = { + .flowi4_oif = dev->ifindex, + .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, + .flowi4_mark = skb->mark, + }; + int err; + + err = ipmr_fib_lookup(net, &fl4, &mrt); + if (err < 0) { + kfree_skb(skb); + return err; + } read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; - ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, - IGMPMSG_WHOLEPKT); + ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; @@ -220,17 +484,23 @@ static void reg_vif_setup(struct net_device *dev) dev->type = ARPHRD_PIMREG; dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; - dev->netdev_ops = ®_vif_netdev_ops, + dev->netdev_ops = ®_vif_netdev_ops; dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; } -static struct net_device *ipmr_reg_vif(struct net *net) +static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { struct net_device *dev; struct in_device *in_dev; + char name[IFNAMSIZ]; + + if (mrt->id == RT_TABLE_DEFAULT) + sprintf(name, "pimreg"); + else + sprintf(name, "pimreg%u", mrt->id); - dev = alloc_netdev(0, "pimreg", reg_vif_setup); + dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; @@ -244,12 +514,14 @@ static struct net_device *ipmr_reg_vif(struct net *net) dev->iflink = 0; rcu_read_lock(); - if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) { rcu_read_unlock(); goto failure; } ipv4_devconf_setall(in_dev); + neigh_parms_data_state_setall(in_dev->arp_parms); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; rcu_read_unlock(); @@ -270,22 +542,22 @@ failure: } #endif -/* - * Delete a VIF entry +/** + * vif_delete - Delete a VIF entry * @notify: Set to 1, if the caller is a notifier_call */ -static int vif_delete(struct net *net, int vifi, int notify, +static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; - if (vifi < 0 || vifi >= net->ipv4.maxvif) + if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; - v = &net->ipv4.vif_table[vifi]; + v = &mrt->vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; @@ -297,108 +569,119 @@ static int vif_delete(struct net *net, int vifi, int notify, } #ifdef CONFIG_IP_PIMSM - if (vifi == net->ipv4.mroute_reg_vif_num) - net->ipv4.mroute_reg_vif_num = -1; + if (vifi == mrt->mroute_reg_vif_num) + mrt->mroute_reg_vif_num = -1; #endif - if (vifi+1 == net->ipv4.maxvif) { + if (vifi + 1 == mrt->maxvif) { int tmp; - for (tmp=vifi-1; tmp>=0; tmp--) { - if (VIF_EXISTS(net, tmp)) + + for (tmp = vifi - 1; tmp >= 0; tmp--) { + if (VIF_EXISTS(mrt, tmp)) break; } - net->ipv4.maxvif = tmp+1; + mrt->maxvif = tmp+1; } write_unlock_bh(&mrt_lock); dev_set_allmulti(dev, -1); - if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { + in_dev = __in_dev_get_rtnl(dev); + if (in_dev) { IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; + inet_netconf_notify_devconf(dev_net(dev), + NETCONFA_MC_FORWARDING, + dev->ifindex, &in_dev->cnf); ip_rt_multicast_event(in_dev); } - if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) + if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); dev_put(dev); return 0; } -static inline void ipmr_cache_free(struct mfc_cache *c) +static void ipmr_cache_free_rcu(struct rcu_head *head) { - release_net(mfc_net(c)); + struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); + kmem_cache_free(mrt_cachep, c); } +static inline void ipmr_cache_free(struct mfc_cache *c) +{ + call_rcu(&c->rcu, ipmr_cache_free_rcu); +} + /* Destroy an unresolved cache entry, killing queued skbs - and reporting error to netlink readers. + * and reporting error to netlink readers. */ -static void ipmr_destroy_unres(struct mfc_cache *c) +static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { + struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; struct nlmsgerr *e; - struct net *net = mfc_net(c); - atomic_dec(&net->ipv4.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; - nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); + nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); - e = NLMSG_DATA(nlh); + e = nlmsg_data(nlh); e->error = -ETIMEDOUT; memset(&e->msg, 0, sizeof(e->msg)); - rtnl_unicast(skb, net, NETLINK_CB(skb).pid); - } else + rtnl_unicast(skb, net, NETLINK_CB(skb).portid); + } else { kfree_skb(skb); + } } ipmr_cache_free(c); } -/* Single timer process for all the unresolved queue. */ +/* Timer process for the unresolved queue. */ -static void ipmr_expire_process(unsigned long dummy) +static void ipmr_expire_process(unsigned long arg) { + struct mr_table *mrt = (struct mr_table *)arg; unsigned long now; unsigned long expires; - struct mfc_cache *c, **cp; + struct mfc_cache *c, *next; if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&ipmr_expire_timer, jiffies+HZ/10); + mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); return; } - if (mfc_unres_queue == NULL) + if (list_empty(&mrt->mfc_unres_queue)) goto out; now = jiffies; expires = 10*HZ; - cp = &mfc_unres_queue; - while ((c=*cp) != NULL) { + list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; - cp = &c->next; continue; } - *cp = c->next; - - ipmr_destroy_unres(c); + list_del(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); + ipmr_destroy_unres(mrt, c); } - if (mfc_unres_queue != NULL) - mod_timer(&ipmr_expire_timer, jiffies + expires); + if (!list_empty(&mrt->mfc_unres_queue)) + mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); out: spin_unlock(&mfc_unres_lock); @@ -406,17 +689,17 @@ out: /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) +static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, + unsigned char *ttls) { int vifi; - struct net *net = mfc_net(cache); cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXVIFS); - for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { - if (VIF_EXISTS(net, vifi) && + for (vifi = 0; vifi < mrt->maxvif; vifi++) { + if (VIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) @@ -427,16 +710,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) } } -static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) +static int vif_add(struct net *net, struct mr_table *mrt, + struct vifctl *vifc, int mrtsock) { int vifi = vifc->vifc_vifi; - struct vif_device *v = &net->ipv4.vif_table[vifi]; + struct vif_device *v = &mrt->vif_table[vifi]; struct net_device *dev; struct in_device *in_dev; int err; /* Is vif busy ? */ - if (VIF_EXISTS(net, vifi)) + if (VIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->vifc_flags) { @@ -446,9 +730,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ - if (net->ipv4.mroute_reg_vif_num >= 0) + if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; - dev = ipmr_reg_vif(net); + dev = ipmr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); @@ -475,13 +759,13 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) case 0: if (vifc->vifc_flags == VIFF_USE_IFINDEX) { dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); - if (dev && dev->ip_ptr == NULL) { + if (dev && __in_dev_get_rtnl(dev) == NULL) { dev_put(dev); return -EADDRNOTAVAIL; } - } else + } else { dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); - + } if (!dev) return -EADDRNOTAVAIL; err = dev_set_allmulti(dev, 1); @@ -494,16 +778,18 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) return -EINVAL; } - if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { + in_dev = __in_dev_get_rtnl(dev); + if (!in_dev) { dev_put(dev); return -EADDRNOTAVAIL; } IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; + inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex, + &in_dev->cnf); ip_rt_multicast_event(in_dev); - /* - * Fill in the VIF structures - */ + /* Fill in the VIF structures */ + v->rate_limit = vifc->vifc_rate_limit; v->local = vifc->vifc_lcl_addr.s_addr; v->remote = vifc->vifc_rmt_addr.s_addr; @@ -516,57 +802,100 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) v->pkt_in = 0; v->pkt_out = 0; v->link = dev->ifindex; - if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER)) + if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) v->link = dev->iflink; /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; #ifdef CONFIG_IP_PIMSM - if (v->flags&VIFF_REGISTER) - net->ipv4.mroute_reg_vif_num = vifi; + if (v->flags & VIFF_REGISTER) + mrt->mroute_reg_vif_num = vifi; #endif - if (vifi+1 > net->ipv4.maxvif) - net->ipv4.maxvif = vifi+1; + if (vifi+1 > mrt->maxvif) + mrt->maxvif = vifi+1; write_unlock_bh(&mrt_lock); return 0; } -static struct mfc_cache *ipmr_cache_find(struct net *net, +/* called with rcu_read_lock() */ +static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, __be32 mcastgrp) { int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; - for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { - if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) - break; + list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { + if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) + return c; } - return c; + return NULL; +} + +/* Look for a (*,*,oif) entry */ +static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt, + int vifi) +{ + int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY)); + struct mfc_cache *c; + + list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) + if (c->mfc_origin == htonl(INADDR_ANY) && + c->mfc_mcastgrp == htonl(INADDR_ANY) && + c->mfc_un.res.ttls[vifi] < 255) + return c; + + return NULL; +} + +/* Look for a (*,G) entry */ +static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt, + __be32 mcastgrp, int vifi) +{ + int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY)); + struct mfc_cache *c, *proxy; + + if (mcastgrp == htonl(INADDR_ANY)) + goto skip; + + list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) + if (c->mfc_origin == htonl(INADDR_ANY) && + c->mfc_mcastgrp == mcastgrp) { + if (c->mfc_un.res.ttls[vifi] < 255) + return c; + + /* It's ok if the vifi is part of the static tree */ + proxy = ipmr_cache_find_any_parent(mrt, + c->mfc_parent); + if (proxy && proxy->mfc_un.res.ttls[vifi] < 255) + return c; + } + +skip: + return ipmr_cache_find_any_parent(mrt, vifi); } /* * Allocate a multicast cache entry */ -static struct mfc_cache *ipmr_cache_alloc(struct net *net) +static struct mfc_cache *ipmr_cache_alloc(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); - if (c == NULL) - return NULL; - c->mfc_un.res.minvif = MAXVIFS; - mfc_net_set(c, net); + + if (c) + c->mfc_un.res.minvif = MAXVIFS; return c; } -static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) +static struct mfc_cache *ipmr_cache_alloc_unres(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); - if (c == NULL) - return NULL; - skb_queue_head_init(&c->mfc_un.unres.unresolved); - c->mfc_un.unres.expires = jiffies + 10*HZ; - mfc_net_set(c, net); + + if (c) { + skb_queue_head_init(&c->mfc_un.unres.unresolved); + c->mfc_un.unres.expires = jiffies + 10*HZ; + } return c; } @@ -574,34 +903,34 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) * A cache entry has gone into a resolved state from queued */ -static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) +static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, + struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; - /* - * Play the pending entries through our router - */ + /* Play the pending entries through our router */ while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); - if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { - nlh->nlmsg_len = (skb_tail_pointer(skb) - - (u8 *)nlh); + if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) { + nlh->nlmsg_len = skb_tail_pointer(skb) - + (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; - nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); + nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); - e = NLMSG_DATA(nlh); + e = nlmsg_data(nlh); e->error = -EMSGSIZE; memset(&e->msg, 0, sizeof(e->msg)); } - rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); - } else - ip_mr_forward(skb, c, 0); + rtnl_unicast(skb, net, NETLINK_CB(skb).portid); + } else { + ip_mr_forward(net, mrt, skb, c, 0); + } } } @@ -612,13 +941,14 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) * Called under mrt_lock. */ -static int ipmr_cache_report(struct net *net, +static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; const int ihl = ip_hdrlen(pkt); struct igmphdr *igmp; struct igmpmsg *msg; + struct sock *mroute_sk; int ret; #ifdef CONFIG_IP_PIMSM @@ -634,9 +964,9 @@ static int ipmr_cache_report(struct net *net, #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) { /* Ugly, but we have no choice with this interface. - Duplicate old header, fix ihl, length etc. - And all this only to mangle msg->im_msgtype and - to set msg->im_mbz to "mbz" :-) + * Duplicate old header, fix ihl, length etc. + * And all this only to mangle msg->im_msgtype and + * to set msg->im_mbz to "mbz" :-) */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); @@ -645,7 +975,7 @@ static int ipmr_cache_report(struct net *net, memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; - msg->im_vif = net->ipv4.mroute_reg_vif_num; + msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); @@ -653,42 +983,40 @@ static int ipmr_cache_report(struct net *net, #endif { - /* - * Copy the IP header - */ + /* Copy the IP header */ - skb->network_header = skb->tail; + skb_set_network_header(skb, skb->len); skb_put(skb, ihl); skb_copy_to_linear_data(skb, pkt->data, ihl); - ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ + ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ msg = (struct igmpmsg *)skb_network_header(skb); msg->im_vif = vifi; skb_dst_set(skb, dst_clone(skb_dst(pkt))); - /* - * Add our header - */ + /* Add our header */ - igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); + igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); igmp->type = msg->im_msgtype = assert; - igmp->code = 0; - ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ + igmp->code = 0; + ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ skb->transport_header = skb->network_header; } - if (net->ipv4.mroute_sk == NULL) { + rcu_read_lock(); + mroute_sk = rcu_dereference(mrt->mroute_sk); + if (mroute_sk == NULL) { + rcu_read_unlock(); kfree_skb(skb); return -EINVAL; } - /* - * Deliver to mrouted - */ - ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); + /* Deliver to mrouted */ + + ret = sock_queue_rcv_skb(mroute_sk, skb); + rcu_read_unlock(); if (ret < 0) { - if (net_ratelimit()) - printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); + net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); kfree_skb(skb); } @@ -700,44 +1028,42 @@ static int ipmr_cache_report(struct net *net, */ static int -ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) +ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) { + bool found = false; int err; struct mfc_cache *c; const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); - for (c=mfc_unres_queue; c; c=c->next) { - if (net_eq(mfc_net(c), net) && - c->mfc_mcastgrp == iph->daddr && - c->mfc_origin == iph->saddr) + list_for_each_entry(c, &mrt->mfc_unres_queue, list) { + if (c->mfc_mcastgrp == iph->daddr && + c->mfc_origin == iph->saddr) { + found = true; break; + } } - if (c == NULL) { - /* - * Create a new entry if allowable - */ + if (!found) { + /* Create a new entry if allowable */ - if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || - (c = ipmr_cache_alloc_unres(net)) == NULL) { + if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || + (c = ipmr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); return -ENOBUFS; } - /* - * Fill in the new cache entry - */ + /* Fill in the new cache entry */ + c->mfc_parent = -1; c->mfc_origin = iph->saddr; c->mfc_mcastgrp = iph->daddr; - /* - * Reflect first query at mrouted. - */ - err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); + /* Reflect first query at mrouted. */ + + err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker @@ -749,17 +1075,17 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) return err; } - atomic_inc(&net->ipv4.cache_resolve_queue_len); - c->next = mfc_unres_queue; - mfc_unres_queue = c; + atomic_inc(&mrt->cache_resolve_queue_len); + list_add(&c->list, &mrt->mfc_unres_queue); + mroute_netlink_event(mrt, c, RTM_NEWROUTE); - mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); + if (atomic_read(&mrt->cache_resolve_queue_len) == 1) + mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); } - /* - * See if we can append the packet - */ - if (c->mfc_un.unres.unresolved.qlen>3) { + /* See if we can append the packet */ + + if (c->mfc_un.unres.unresolved.qlen > 3) { kfree_skb(skb); err = -ENOBUFS; } else { @@ -775,21 +1101,19 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) * MFC cache manipulation by user space mroute daemon */ -static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) +static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) { int line; - struct mfc_cache *c, **cp; + struct mfc_cache *c, *next; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp = &net->ipv4.mfc_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && - c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { - write_lock_bh(&mrt_lock); - *cp = c->next; - write_unlock_bh(&mrt_lock); - + c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr && + (parent == -1 || parent == c->mfc_parent)) { + list_del_rcu(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); ipmr_cache_free(c); return 0; } @@ -797,72 +1121,79 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) return -ENOENT; } -static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) +static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, + struct mfcctl *mfc, int mrtsock, int parent) { + bool found = false; int line; - struct mfc_cache *uc, *c, **cp; + struct mfc_cache *uc, *c; + + if (mfc->mfcc_parent >= MAXVIFS) + return -ENFILE; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp = &net->ipv4.mfc_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && - c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) + c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr && + (parent == -1 || parent == c->mfc_parent)) { + found = true; break; + } } - if (c != NULL) { + if (found) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(c, mfc->mfcc_ttls); + ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); + mroute_netlink_event(mrt, c, RTM_NEWROUTE); return 0; } - if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) + if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) && + !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; - c = ipmr_cache_alloc(net); + c = ipmr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(c, mfc->mfcc_ttls); + ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; - write_lock_bh(&mrt_lock); - c->next = net->ipv4.mfc_cache_array[line]; - net->ipv4.mfc_cache_array[line] = c; - write_unlock_bh(&mrt_lock); + list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); /* * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ + found = false; spin_lock_bh(&mfc_unres_lock); - for (cp = &mfc_unres_queue; (uc=*cp) != NULL; - cp = &uc->next) { - if (net_eq(mfc_net(uc), net) && - uc->mfc_origin == c->mfc_origin && + list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { + if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { - *cp = uc->next; - atomic_dec(&net->ipv4.cache_resolve_queue_len); + list_del(&uc->list); + atomic_dec(&mrt->cache_resolve_queue_len); + found = true; break; } } - if (mfc_unres_queue == NULL) - del_timer(&ipmr_expire_timer); + if (list_empty(&mrt->mfc_unres_queue)) + del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); - if (uc) { - ipmr_cache_resolve(uc, c); + if (found) { + ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_free(uc); } + mroute_netlink_event(mrt, c, RTM_NEWROUTE); return 0; } @@ -870,71 +1201,61 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct net *net) +static void mroute_clean_tables(struct mr_table *mrt) { int i; LIST_HEAD(list); + struct mfc_cache *c, *next; - /* - * Shut down all active vif entries - */ - for (i = 0; i < net->ipv4.maxvif; i++) { - if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) - vif_delete(net, i, 0, &list); + /* Shut down all active vif entries */ + + for (i = 0; i < mrt->maxvif; i++) { + if (!(mrt->vif_table[i].flags & VIFF_STATIC)) + vif_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); - /* - * Wipe the cache - */ - for (i=0; i<MFC_LINES; i++) { - struct mfc_cache *c, **cp; + /* Wipe the cache */ - cp = &net->ipv4.mfc_cache_array[i]; - while ((c = *cp) != NULL) { - if (c->mfc_flags&MFC_STATIC) { - cp = &c->next; + for (i = 0; i < MFC_LINES; i++) { + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { + if (c->mfc_flags & MFC_STATIC) continue; - } - write_lock_bh(&mrt_lock); - *cp = c->next; - write_unlock_bh(&mrt_lock); - + list_del_rcu(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); ipmr_cache_free(c); } } - if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { - struct mfc_cache *c, **cp; - + if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); - cp = &mfc_unres_queue; - while ((c = *cp) != NULL) { - if (!net_eq(mfc_net(c), net)) { - cp = &c->next; - continue; - } - *cp = c->next; - - ipmr_destroy_unres(c); + list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { + list_del(&c->list); + mroute_netlink_event(mrt, c, RTM_DELROUTE); + ipmr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } } +/* called from ip_ra_control(), before an RCU grace period, + * we dont need to call synchronize_rcu() here + */ static void mrtsock_destruct(struct sock *sk) { struct net *net = sock_net(sk); + struct mr_table *mrt; rtnl_lock(); - if (sk == net->ipv4.mroute_sk) { - IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; - - write_lock_bh(&mrt_lock); - net->ipv4.mroute_sk = NULL; - write_unlock_bh(&mrt_lock); - - mroute_clean_tables(net); + ipmr_for_each_table(mrt, net) { + if (sk == rtnl_dereference(mrt->mroute_sk)) { + IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; + inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + NETCONFA_IFINDEX_ALL, + net->ipv4.devconf_all); + RCU_INIT_POINTER(mrt->mroute_sk, NULL); + mroute_clean_tables(mrt); + } } rtnl_unlock(); } @@ -948,42 +1269,49 @@ static void mrtsock_destruct(struct sock *sk) int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) { - int ret; + int ret, parent = 0; struct vifctl vif; struct mfcctl mfc; struct net *net = sock_net(sk); + struct mr_table *mrt; + + if (sk->sk_type != SOCK_RAW || + inet_sk(sk)->inet_num != IPPROTO_IGMP) + return -EOPNOTSUPP; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT_INIT) { - if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) + if (sk != rcu_access_pointer(mrt->mroute_sk) && + !ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EACCES; } switch (optname) { case MRT_INIT: - if (sk->sk_type != SOCK_RAW || - inet_sk(sk)->inet_num != IPPROTO_IGMP) - return -EOPNOTSUPP; if (optlen != sizeof(int)) - return -ENOPROTOOPT; + return -EINVAL; rtnl_lock(); - if (net->ipv4.mroute_sk) { + if (rtnl_dereference(mrt->mroute_sk)) { rtnl_unlock(); return -EADDRINUSE; } ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { - write_lock_bh(&mrt_lock); - net->ipv4.mroute_sk = sk; - write_unlock_bh(&mrt_lock); - + rcu_assign_pointer(mrt->mroute_sk, sk); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; + inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + NETCONFA_IFINDEX_ALL, + net->ipv4.devconf_all); } rtnl_unlock(); return ret; case MRT_DONE: - if (sk != net->ipv4.mroute_sk) + if (sk != rcu_access_pointer(mrt->mroute_sk)) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: @@ -996,9 +1324,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi return -ENFILE; rtnl_lock(); if (optname == MRT_ADD_VIF) { - ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); + ret = vif_add(net, mrt, &vif, + sk == rtnl_dereference(mrt->mroute_sk)); } else { - ret = vif_delete(net, vif.vifc_vifi, 0, NULL); + ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); } rtnl_unlock(); return ret; @@ -1009,15 +1338,22 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi */ case MRT_ADD_MFC: case MRT_DEL_MFC: + parent = -1; + case MRT_ADD_MFC_PROXY: + case MRT_DEL_MFC_PROXY: if (optlen != sizeof(mfc)) return -EINVAL; if (copy_from_user(&mfc, optval, sizeof(mfc))) return -EFAULT; + if (parent == 0) + parent = mfc.mfcc_parent; rtnl_lock(); - if (optname == MRT_DEL_MFC) - ret = ipmr_mfc_delete(net, &mfc); + if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY) + ret = ipmr_mfc_delete(mrt, &mfc, parent); else - ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); + ret = ipmr_mfc_add(net, mrt, &mfc, + sk == rtnl_dereference(mrt->mroute_sk), + parent); rtnl_unlock(); return ret; /* @@ -1026,9 +1362,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi case MRT_ASSERT: { int v; - if (get_user(v,(int __user *)optval)) + if (optlen != sizeof(v)) + return -EINVAL; + if (get_user(v, (int __user *)optval)) return -EFAULT; - net->ipv4.mroute_do_assert = (v) ? 1 : 0; + mrt->mroute_do_assert = v; return 0; } #ifdef CONFIG_IP_PIMSM @@ -1036,15 +1374,45 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi { int v; - if (get_user(v,(int __user *)optval)) + if (optlen != sizeof(v)) + return -EINVAL; + if (get_user(v, (int __user *)optval)) + return -EFAULT; + v = !!v; + + rtnl_lock(); + ret = 0; + if (v != mrt->mroute_do_pim) { + mrt->mroute_do_pim = v; + mrt->mroute_do_assert = v; + } + rtnl_unlock(); + return ret; + } +#endif +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES + case MRT_TABLE: + { + u32 v; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(v, (u32 __user *)optval)) return -EFAULT; - v = (v) ? 1 : 0; + + /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ + if (v != RT_TABLE_DEFAULT && v >= 1000000000) + return -EINVAL; rtnl_lock(); ret = 0; - if (v != net->ipv4.mroute_do_pim) { - net->ipv4.mroute_do_pim = v; - net->ipv4.mroute_do_assert = v; + if (sk == rtnl_dereference(mrt->mroute_sk)) { + ret = -EBUSY; + } else { + if (!ipmr_new_table(net, v)) + ret = -ENOMEM; + else + raw_sk(sk)->ipmr_table = v; } rtnl_unlock(); return ret; @@ -1068,12 +1436,21 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int int olr; int val; struct net *net = sock_net(sk); + struct mr_table *mrt; + + if (sk->sk_type != SOCK_RAW || + inet_sk(sk)->inet_num != IPPROTO_IGMP) + return -EOPNOTSUPP; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM - optname!=MRT_PIM && + optname != MRT_PIM && #endif - optname!=MRT_ASSERT) + optname != MRT_ASSERT) return -ENOPROTOOPT; if (get_user(olr, optlen)) @@ -1089,10 +1466,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int val = 0x0305; #ifdef CONFIG_IP_PIMSM else if (optname == MRT_PIM) - val = net->ipv4.mroute_do_pim; + val = mrt->mroute_do_pim; #endif else - val = net->ipv4.mroute_do_assert; + val = mrt->mroute_do_assert; if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; @@ -1109,16 +1486,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); + struct mr_table *mrt; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.vifi >= net->ipv4.maxvif) + if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); - vif = &net->ipv4.vif_table[vr.vifi]; - if (VIF_EXISTS(net, vr.vifi)) { + vif = &mrt->vif_table[vr.vifi]; + if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; @@ -1135,45 +1517,119 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; - read_lock(&mrt_lock); - c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); + rcu_read_lock(); + c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; sr.wrong_if = c->mfc_un.res.wrong_if; - read_unlock(&mrt_lock); + rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } + rcu_read_unlock(); + return -EADDRNOTAVAIL; + default: + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +struct compat_sioc_sg_req { + struct in_addr src; + struct in_addr grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_vif_req { + vifi_t vifi; /* Which iface */ + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) +{ + struct compat_sioc_sg_req sr; + struct compat_sioc_vif_req vr; + struct vif_device *vif; + struct mfc_cache *c; + struct net *net = sock_net(sk); + struct mr_table *mrt; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; + + switch (cmd) { + case SIOCGETVIFCNT: + if (copy_from_user(&vr, arg, sizeof(vr))) + return -EFAULT; + if (vr.vifi >= mrt->maxvif) + return -EINVAL; + read_lock(&mrt_lock); + vif = &mrt->vif_table[vr.vifi]; + if (VIF_EXISTS(mrt, vr.vifi)) { + vr.icount = vif->pkt_in; + vr.ocount = vif->pkt_out; + vr.ibytes = vif->bytes_in; + vr.obytes = vif->bytes_out; + read_unlock(&mrt_lock); + + if (copy_to_user(arg, &vr, sizeof(vr))) + return -EFAULT; + return 0; + } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; + case SIOCGETSGCNT: + if (copy_from_user(&sr, arg, sizeof(sr))) + return -EFAULT; + + rcu_read_lock(); + c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); + if (c) { + sr.pktcnt = c->mfc_un.res.pkt; + sr.bytecnt = c->mfc_un.res.bytes; + sr.wrong_if = c->mfc_un.res.wrong_if; + rcu_read_unlock(); + + if (copy_to_user(arg, &sr, sizeof(sr))) + return -EFAULT; + return 0; + } + rcu_read_unlock(); + return -EADDRNOTAVAIL; default: return -ENOIOCTLCMD; } } +#endif static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); + struct mr_table *mrt; struct vif_device *v; int ct; - LIST_HEAD(list); - - if (!net_eq(dev_net(dev), net)) - return NOTIFY_DONE; if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &net->ipv4.vif_table[0]; - for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { - if (v->dev == dev) - vif_delete(net, ct, 1, &list); + + ipmr_for_each_table(mrt, net) { + v = &mrt->vif_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) + vif_delete(mrt, ct, 1, NULL); + } } - unregister_netdevice_many(&list); return NOTIFY_DONE; } @@ -1183,7 +1639,7 @@ static struct notifier_block ip_mr_notifier = { }; /* - * Encapsulate a packet by attaching a valid IPIP header to it. + * Encapsulate a packet by attaching a valid IPIP header to it. * This avoids tunnel drivers and other mess and gives us the speed so * important for multicast video. */ @@ -1191,14 +1647,14 @@ static struct notifier_block ip_mr_notifier = { static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct iphdr *iph; - struct iphdr *old_iph = ip_hdr(skb); + const struct iphdr *old_iph = ip_hdr(skb); skb_push(skb, sizeof(struct iphdr)); skb->transport_header = skb->network_header; skb_reset_network_header(skb); iph = ip_hdr(skb); - iph->version = 4; + iph->version = 4; iph->tos = old_iph->tos; iph->ttl = old_iph->ttl; iph->frag_off = 0; @@ -1207,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) iph->protocol = IPPROTO_IPIP; iph->ihl = 5; iph->tot_len = htons(skb->len); - ip_select_ident(iph, skb_dst(skb), NULL); + ip_select_ident(skb, NULL); ip_send_check(iph); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); @@ -1216,9 +1672,10 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) static inline int ipmr_forward_finish(struct sk_buff *skb) { - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); @@ -1230,13 +1687,14 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) * Processing handlers for ipmr_forward */ -static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) +static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *c, int vifi) { - struct net *net = mfc_net(c); const struct iphdr *iph = ip_hdr(skb); - struct vif_device *vif = &net->ipv4.vif_table[vifi]; + struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *dev; struct rtable *rt; + struct flowi4 fl4; int encap = 0; if (vif->dev == NULL) @@ -1248,37 +1706,35 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); + ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); goto out_free; } #endif - if (vif->flags&VIFF_TUNNEL) { - struct flowi fl = { .oif = vif->link, - .nl_u = { .ip4_u = - { .daddr = vif->remote, - .saddr = vif->local, - .tos = RT_TOS(iph->tos) } }, - .proto = IPPROTO_IPIP }; - if (ip_route_output_key(net, &rt, &fl)) + if (vif->flags & VIFF_TUNNEL) { + rt = ip_route_output_ports(net, &fl4, NULL, + vif->remote, vif->local, + 0, 0, + IPPROTO_IPIP, + RT_TOS(iph->tos), vif->link); + if (IS_ERR(rt)) goto out_free; encap = sizeof(struct iphdr); } else { - struct flowi fl = { .oif = vif->link, - .nl_u = { .ip4_u = - { .daddr = iph->daddr, - .tos = RT_TOS(iph->tos) } }, - .proto = IPPROTO_IPIP }; - if (ip_route_output_key(net, &rt, &fl)) + rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, + 0, 0, + IPPROTO_IPIP, + RT_TOS(iph->tos), vif->link); + if (IS_ERR(rt)) goto out_free; } - dev = rt->u.dst.dev; + dev = rt->dst.dev; - if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) { + if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { /* Do not fragment multicasts. Alas, IPv4 does not - allow to send ICMP, so that packets will disappear - to blackhole. + * allow to send ICMP, so that packets will disappear + * to blackhole. */ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); @@ -1286,7 +1742,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) goto out_free; } - encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; + encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; if (skb_cow(skb, encap)) { ip_rt_put(rt); @@ -1297,11 +1753,12 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) vif->bytes_out += skb->len; skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); ip_decrease_ttl(ip_hdr(skb)); /* FIXME: forward and output firewalls used to be called here. - * What do we do with netfilter? -- RR */ + * What do we do with netfilter? -- RR + */ if (vif->flags & VIFF_TUNNEL) { ip_encap(skb, vif->local, vif->remote); /* FIXME: extra output firewall step used to be here. --RR */ @@ -1322,21 +1779,20 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) * not mrouter) cannot join to more than one interface - it will * result in receiving multiple packets. */ - NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev, + NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, ipmr_forward_finish); return; out_free: kfree_skb(skb); - return; } -static int ipmr_find_vif(struct net_device *dev) +static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) { - struct net *net = dev_net(dev); int ct; - for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { - if (net->ipv4.vif_table[ct].dev == dev) + + for (ct = mrt->maxvif-1; ct >= 0; ct--) { + if (mrt->vif_table[ct].dev == dev) break; } return ct; @@ -1344,92 +1800,150 @@ static int ipmr_find_vif(struct net_device *dev) /* "local" means that we should preserve one skb (for local delivery) */ -static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) +static void ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local) { int psend = -1; int vif, ct; - struct net *net = mfc_net(cache); + int true_vifi = ipmr_find_vif(mrt, skb->dev); vif = cache->mfc_parent; cache->mfc_un.res.pkt++; cache->mfc_un.res.bytes += skb->len; + if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) { + struct mfc_cache *cache_proxy; + + /* For an (*,G) entry, we only check that the incomming + * interface is part of the static tree. + */ + cache_proxy = ipmr_cache_find_any_parent(mrt, vif); + if (cache_proxy && + cache_proxy->mfc_un.res.ttls[true_vifi] < 255) + goto forward; + } + /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (net->ipv4.vif_table[vif].dev != skb->dev) { - int true_vifi; - - if (skb_rtable(skb)->fl.iif == 0) { + if (mrt->vif_table[vif].dev != skb->dev) { + if (rt_is_output_route(skb_rtable(skb))) { /* It is our own packet, looped back. - Very complicated situation... - - The best workaround until routing daemons will be - fixed is not to redistribute packet, if it was - send through wrong interface. It means, that - multicast applications WILL NOT work for - (S,G), which have default multicast route pointing - to wrong oif. In any case, it is not a good - idea to use multicasting applications on router. + * Very complicated situation... + * + * The best workaround until routing daemons will be + * fixed is not to redistribute packet, if it was + * send through wrong interface. It means, that + * multicast applications WILL NOT work for + * (S,G), which have default multicast route pointing + * to wrong oif. In any case, it is not a good + * idea to use multicasting applications on router. */ goto dont_forward; } cache->mfc_un.res.wrong_if++; - true_vifi = ipmr_find_vif(skb->dev); - if (true_vifi >= 0 && net->ipv4.mroute_do_assert && + if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, - so that we cannot check that packet arrived on an oif. - It is bad, but otherwise we would need to move pretty - large chunk of pimd to kernel. Ough... --ANK + * so that we cannot check that packet arrived on an oif. + * It is bad, but otherwise we would need to move pretty + * large chunk of pimd to kernel. Ough... --ANK */ - (net->ipv4.mroute_do_pim || + (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; - ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); + ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); } goto dont_forward; } - net->ipv4.vif_table[vif].pkt_in++; - net->ipv4.vif_table[vif].bytes_in += skb->len; +forward: + mrt->vif_table[vif].pkt_in++; + mrt->vif_table[vif].bytes_in += skb->len; /* * Forward the frame */ - for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) { - if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { + if (cache->mfc_origin == htonl(INADDR_ANY) && + cache->mfc_mcastgrp == htonl(INADDR_ANY)) { + if (true_vifi >= 0 && + true_vifi != cache->mfc_parent && + ip_hdr(skb)->ttl > + cache->mfc_un.res.ttls[cache->mfc_parent]) { + /* It's an (*,*) entry and the packet is not coming from + * the upstream: forward the packet to the upstream + * only. + */ + psend = cache->mfc_parent; + goto last_forward; + } + goto dont_forward; + } + for (ct = cache->mfc_un.res.maxvif - 1; + ct >= cache->mfc_un.res.minvif; ct--) { + /* For (*,G) entry, don't forward to the incoming interface */ + if ((cache->mfc_origin != htonl(INADDR_ANY) || + ct != true_vifi) && + ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) - ipmr_queue_xmit(skb2, cache, psend); + ipmr_queue_xmit(net, mrt, skb2, cache, + psend); } psend = ct; } } +last_forward: if (psend != -1) { if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) - ipmr_queue_xmit(skb2, cache, psend); + ipmr_queue_xmit(net, mrt, skb2, cache, psend); } else { - ipmr_queue_xmit(skb, cache, psend); - return 0; + ipmr_queue_xmit(net, mrt, skb, cache, psend); + return; } } dont_forward: if (!local) kfree_skb(skb); - return 0; } +static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) +{ + struct rtable *rt = skb_rtable(skb); + struct iphdr *iph = ip_hdr(skb); + struct flowi4 fl4 = { + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowi4_tos = RT_TOS(iph->tos), + .flowi4_oif = (rt_is_output_route(rt) ? + skb->dev->ifindex : 0), + .flowi4_iif = (rt_is_output_route(rt) ? + LOOPBACK_IFINDEX : + skb->dev->ifindex), + .flowi4_mark = skb->mark, + }; + struct mr_table *mrt; + int err; + + err = ipmr_fib_lookup(net, &fl4, &mrt); + if (err) + return ERR_PTR(err); + return mrt; +} /* * Multicast packets for forwarding arrive here + * Called with rcu_read_lock(); */ int ip_mr_input(struct sk_buff *skb) @@ -1437,37 +1951,50 @@ int ip_mr_input(struct sk_buff *skb) struct mfc_cache *cache; struct net *net = dev_net(skb->dev); int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; + struct mr_table *mrt; /* Packet is looped back after forward, it should not be - forwarded second time, but still can be delivered locally. + * forwarded second time, but still can be delivered locally. */ - if (IPCB(skb)->flags&IPSKB_FORWARDED) + if (IPCB(skb)->flags & IPSKB_FORWARDED) goto dont_forward; + mrt = ipmr_rt_fib_lookup(net, skb); + if (IS_ERR(mrt)) { + kfree_skb(skb); + return PTR_ERR(mrt); + } if (!local) { - if (IPCB(skb)->opt.router_alert) { - if (ip_call_ra_chain(skb)) - return 0; - } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){ - /* IGMPv1 (and broken IGMPv2 implementations sort of - Cisco IOS <= 11.2(8)) do not put router alert - option to IGMP packets destined to routable - groups. It is very bad, because it means - that we can forward NO IGMP messages. - */ - read_lock(&mrt_lock); - if (net->ipv4.mroute_sk) { - nf_reset(skb); - raw_rcv(net->ipv4.mroute_sk, skb); - read_unlock(&mrt_lock); - return 0; - } - read_unlock(&mrt_lock); + if (IPCB(skb)->opt.router_alert) { + if (ip_call_ra_chain(skb)) + return 0; + } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { + /* IGMPv1 (and broken IGMPv2 implementations sort of + * Cisco IOS <= 11.2(8)) do not put router alert + * option to IGMP packets destined to routable + * groups. It is very bad, because it means + * that we can forward NO IGMP messages. + */ + struct sock *mroute_sk; + + mroute_sk = rcu_dereference(mrt->mroute_sk); + if (mroute_sk) { + nf_reset(skb); + raw_rcv(mroute_sk, skb); + return 0; + } } } - read_lock(&mrt_lock); - cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); + /* already under rcu_read_lock() */ + cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); + if (cache == NULL) { + int vif = ipmr_find_vif(mrt, skb->dev); + + if (vif >= 0) + cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, + vif); + } /* * No usable cache entry @@ -1478,27 +2005,26 @@ int ip_mr_input(struct sk_buff *skb) if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ip_local_deliver(skb); - if (skb2 == NULL) { - read_unlock(&mrt_lock); + if (skb2 == NULL) return -ENOBUFS; - } skb = skb2; } - vif = ipmr_find_vif(skb->dev); + read_lock(&mrt_lock); + vif = ipmr_find_vif(mrt, skb->dev); if (vif >= 0) { - int err = ipmr_cache_unresolved(net, vif, skb); + int err2 = ipmr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); - return err; + return err2; } read_unlock(&mrt_lock); kfree_skb(skb); return -ENODEV; } - ip_mr_forward(skb, cache, local); - + read_lock(&mrt_lock); + ip_mr_forward(net, mrt, skb, cache, local); read_unlock(&mrt_lock); if (local) @@ -1514,18 +2040,19 @@ dont_forward: } #ifdef CONFIG_IP_PIMSM -static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) +/* called with rcu_read_lock() */ +static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, + unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; - struct net *net = dev_net(skb->dev); encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* - Check that: - a. packet is really destinted to a multicast group - b. packet is not a NULL-REGISTER - c. packet is not truncated + * Check that: + * a. packet is really sent to a multicast group + * b. packet is not a NULL-REGISTER + * c. packet is not truncated */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || @@ -1533,30 +2060,24 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) return 1; read_lock(&mrt_lock); - if (net->ipv4.mroute_reg_vif_num >= 0) - reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; - if (reg_dev) - dev_hold(reg_dev); + if (mrt->mroute_reg_vif_num >= 0) + reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; read_unlock(&mrt_lock); if (reg_dev == NULL) return 1; skb->mac_header = skb->network_header; - skb_pull(skb, (u8*)encap - skb->data); + skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); - skb->dev = reg_dev; skb->protocol = htons(ETH_P_IP); - skb->ip_summed = 0; - skb->pkt_type = PACKET_HOST; - skb_dst_drop(skb); - reg_dev->stats.rx_bytes += skb->len; - reg_dev->stats.rx_packets++; - nf_reset(skb); + skb->ip_summed = CHECKSUM_NONE; + + skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); + netif_rx(skb); - dev_put(reg_dev); - return 0; + return NET_RX_SUCCESS; } #endif @@ -1565,21 +2086,25 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) * Handle IGMP messages of PIMv1 */ -int pim_rcv_v1(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff *skb) { struct igmphdr *pim; struct net *net = dev_net(skb->dev); + struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); - if (!net->ipv4.mroute_do_pim || + mrt = ipmr_rt_fib_lookup(net, skb); + if (IS_ERR(mrt)) + goto drop; + if (!mrt->mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - if (__pim_rcv(skb, sizeof(*pim))) { + if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } @@ -1588,21 +2113,26 @@ drop: #endif #ifdef CONFIG_IP_PIMSM_V2 -static int pim_rcv(struct sk_buff * skb) +static int pim_rcv(struct sk_buff *skb) { struct pimreghdr *pim; + struct net *net = dev_net(skb->dev); + struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || + if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || + (pim->flags & PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; - if (__pim_rcv(skb, sizeof(*pim))) { + mrt = ipmr_rt_fib_lookup(net, skb); + if (IS_ERR(mrt)) + goto drop; + if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } @@ -1610,71 +2140,95 @@ drop: } #endif -static int -ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) +static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mfc_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; - struct net *net = mfc_net(c); - struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; - u8 *b = skb_tail_pointer(skb); - struct rtattr *mp_head; + struct nlattr *mp_attr; + struct rta_mfc_stats mfcs; + + /* If cache is unresolved, don't try to parse IIF and OIF */ + if (c->mfc_parent >= MAXVIFS) + return -ENOENT; - if (dev) - RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); + if (VIF_EXISTS(mrt, c->mfc_parent) && + nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) + return -EMSGSIZE; - mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); + if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH))) + return -EMSGSIZE; for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (c->mfc_un.res.ttls[ct] < 255) { - if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) - goto rtattr_failure; - nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); + if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { + if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) { + nla_nest_cancel(skb, mp_attr); + return -EMSGSIZE; + } + nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; - nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; + nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } - mp_head->rta_type = RTA_MULTIPATH; - mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; + + nla_nest_end(skb, mp_attr); + + mfcs.mfcs_packets = c->mfc_un.res.pkt; + mfcs.mfcs_bytes = c->mfc_un.res.bytes; + mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; + if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0) + return -EMSGSIZE; + rtm->rtm_type = RTN_MULTICAST; return 1; - -rtattr_failure: - nlmsg_trim(skb, b); - return -EMSGSIZE; } -int ipmr_get_route(struct net *net, - struct sk_buff *skb, struct rtmsg *rtm, int nowait) +int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, int nowait) { - int err; struct mfc_cache *cache; - struct rtable *rt = skb_rtable(skb); + struct mr_table *mrt; + int err; - read_lock(&mrt_lock); - cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; + + rcu_read_lock(); + cache = ipmr_cache_find(mrt, saddr, daddr); + if (cache == NULL && skb->dev) { + int vif = ipmr_find_vif(mrt, skb->dev); + if (vif >= 0) + cache = ipmr_cache_find_any(mrt, daddr, vif); + } if (cache == NULL) { struct sk_buff *skb2; struct iphdr *iph; struct net_device *dev; - int vif; + int vif = -1; if (nowait) { - read_unlock(&mrt_lock); + rcu_read_unlock(); return -EAGAIN; } dev = skb->dev; - if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { + read_lock(&mrt_lock); + if (dev) + vif = ipmr_find_vif(mrt, dev); + if (vif < 0) { read_unlock(&mrt_lock); + rcu_read_unlock(); return -ENODEV; } skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { read_unlock(&mrt_lock); + rcu_read_unlock(); return -ENOMEM; } @@ -1682,27 +2236,186 @@ int ipmr_get_route(struct net *net, skb_reset_network_header(skb2); iph = ip_hdr(skb2); iph->ihl = sizeof(struct iphdr) >> 2; - iph->saddr = rt->rt_src; - iph->daddr = rt->rt_dst; + iph->saddr = saddr; + iph->daddr = daddr; iph->version = 0; - err = ipmr_cache_unresolved(net, vif, skb2); + err = ipmr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); + rcu_read_unlock(); return err; } - if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) + read_lock(&mrt_lock); + if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ipmr_fill_mroute(skb, cache, rtm); + err = __ipmr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); + rcu_read_unlock(); return err; } +static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + u32 portid, u32 seq, struct mfc_cache *c, int cmd, + int flags) +{ + struct nlmsghdr *nlh; + struct rtmsg *rtm; + int err; + + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); + if (nlh == NULL) + return -EMSGSIZE; + + rtm = nlmsg_data(nlh); + rtm->rtm_family = RTNL_FAMILY_IPMR; + rtm->rtm_dst_len = 32; + rtm->rtm_src_len = 32; + rtm->rtm_tos = 0; + rtm->rtm_table = mrt->id; + if (nla_put_u32(skb, RTA_TABLE, mrt->id)) + goto nla_put_failure; + rtm->rtm_type = RTN_MULTICAST; + rtm->rtm_scope = RT_SCOPE_UNIVERSE; + if (c->mfc_flags & MFC_STATIC) + rtm->rtm_protocol = RTPROT_STATIC; + else + rtm->rtm_protocol = RTPROT_MROUTED; + rtm->rtm_flags = 0; + + if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || + nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) + goto nla_put_failure; + err = __ipmr_fill_mroute(mrt, skb, c, rtm); + /* do not break the dump if cache is unresolved */ + if (err < 0 && err != -ENOENT) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static size_t mroute_msgsize(bool unresolved, int maxvif) +{ + size_t len = + NLMSG_ALIGN(sizeof(struct rtmsg)) + + nla_total_size(4) /* RTA_TABLE */ + + nla_total_size(4) /* RTA_SRC */ + + nla_total_size(4) /* RTA_DST */ + ; + + if (!unresolved) + len = len + + nla_total_size(4) /* RTA_IIF */ + + nla_total_size(0) /* RTA_MULTIPATH */ + + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) + /* RTA_MFC_STATS */ + + nla_total_size(sizeof(struct rta_mfc_stats)) + ; + + return len; +} + +static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, + int cmd) +{ + struct net *net = read_pnet(&mrt->net); + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), + GFP_ATOMIC); + if (skb == NULL) + goto errout; + + err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); + if (err < 0) + goto errout; + + rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC); + return; + +errout: + kfree_skb(skb); + if (err < 0) + rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err); +} + +static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct mr_table *mrt; + struct mfc_cache *mfc; + unsigned int t = 0, s_t; + unsigned int h = 0, s_h; + unsigned int e = 0, s_e; + + s_t = cb->args[0]; + s_h = cb->args[1]; + s_e = cb->args[2]; + + rcu_read_lock(); + ipmr_for_each_table(mrt, net) { + if (t < s_t) + goto next_table; + if (t > s_t) + s_h = 0; + for (h = s_h; h < MFC_LINES; h++) { + list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { + if (e < s_e) + goto next_entry; + if (ipmr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) + goto done; +next_entry: + e++; + } + e = s_e = 0; + } + spin_lock_bh(&mfc_unres_lock); + list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { + if (e < s_e) + goto next_entry2; + if (ipmr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) { + spin_unlock_bh(&mfc_unres_lock); + goto done; + } +next_entry2: + e++; + } + spin_unlock_bh(&mfc_unres_lock); + e = s_e = 0; + s_h = 0; +next_table: + t++; + } +done: + rcu_read_unlock(); + + cb->args[2] = e; + cb->args[1] = h; + cb->args[0] = t; + + return skb->len; +} + #ifdef CONFIG_PROC_FS /* - * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif + * The /proc interfaces to multicast routing : + * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif */ struct ipmr_vif_iter { struct seq_net_private p; + struct mr_table *mrt; int ct; }; @@ -1710,11 +2423,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { - if (!VIF_EXISTS(net, iter->ct)) + struct mr_table *mrt = iter->mrt; + + for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { + if (!VIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) - return &net->ipv4.vif_table[iter->ct]; + return &mrt->vif_table[iter->ct]; } return NULL; } @@ -1722,7 +2437,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { + struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt; + + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) @@ -1733,15 +2456,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_vif_seq_idx(net, iter, 0); - while (++iter->ct < net->ipv4.maxvif) { - if (!VIF_EXISTS(net, iter->ct)) + while (++iter->ct < mrt->maxvif) { + if (!VIF_EXISTS(mrt, iter->ct)) continue; - return &net->ipv4.vif_table[iter->ct]; + return &mrt->vif_table[iter->ct]; } return NULL; } @@ -1754,7 +2478,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { - struct net *net = seq_file_net(seq); + struct ipmr_vif_iter *iter = seq->private; + struct mr_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1765,7 +2490,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", - vif - net->ipv4.vif_table, + vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); @@ -1796,7 +2521,8 @@ static const struct file_operations ipmr_vif_fops = { struct ipmr_mfc_iter { struct seq_net_private p; - struct mfc_cache **cache; + struct mr_table *mrt; + struct list_head *cache; int ct; }; @@ -1804,22 +2530,22 @@ struct ipmr_mfc_iter { static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { + struct mr_table *mrt = it->mrt; struct mfc_cache *mfc; - it->cache = net->ipv4.mfc_cache_array; - read_lock(&mrt_lock); - for (it->ct = 0; it->ct < MFC_LINES; it->ct++) - for (mfc = net->ipv4.mfc_cache_array[it->ct]; - mfc; mfc = mfc->next) + rcu_read_lock(); + for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { + it->cache = &mrt->mfc_cache_array[it->ct]; + list_for_each_entry_rcu(mfc, it->cache, list) if (pos-- == 0) return mfc; - read_unlock(&mrt_lock); + } + rcu_read_unlock(); - it->cache = &mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) - if (net_eq(mfc_net(mfc), net) && - pos-- == 0) + it->cache = &mrt->mfc_unres_queue; + list_for_each_entry(mfc, it->cache, list) + if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -1832,7 +2558,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt; + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + it->mrt = mrt; it->cache = NULL; it->ct = 0; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) @@ -1844,39 +2576,38 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt = it->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); - if (mfc->next) - return mfc->next; + if (mfc->list.next != it->cache) + return list_entry(mfc->list.next, struct mfc_cache, list); - if (it->cache == &mfc_unres_queue) + if (it->cache == &mrt->mfc_unres_queue) goto end_of_list; - BUG_ON(it->cache != net->ipv4.mfc_cache_array); + BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); while (++it->ct < MFC_LINES) { - mfc = net->ipv4.mfc_cache_array[it->ct]; - if (mfc) - return mfc; + it->cache = &mrt->mfc_cache_array[it->ct]; + if (list_empty(it->cache)) + continue; + return list_first_entry(it->cache, struct mfc_cache, list); } /* exhausted cache_array, show unresolved */ - read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + rcu_read_unlock(); + it->cache = &mrt->mfc_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = mfc_unres_queue; - while (mfc && !net_eq(mfc_net(mfc), net)) - mfc = mfc->next; - if (mfc) - return mfc; + if (!list_empty(it->cache)) + return list_first_entry(it->cache, struct mfc_cache, list); - end_of_list: +end_of_list: spin_unlock_bh(&mfc_unres_lock); it->cache = NULL; @@ -1886,18 +2617,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; - struct net *net = seq_file_net(seq); + struct mr_table *mrt = it->mrt; - if (it->cache == &mfc_unres_queue) + if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == net->ipv4.mfc_cache_array) - read_unlock(&mrt_lock); + else if (it->cache == &mrt->mfc_cache_array[it->ct]) + rcu_read_unlock(); } static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; - struct net *net = seq_file_net(seq); if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1905,20 +2635,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; + const struct mr_table *mrt = it->mrt; - seq_printf(seq, "%08lX %08lX %-3hd", - (unsigned long) mfc->mfc_mcastgrp, - (unsigned long) mfc->mfc_origin, + seq_printf(seq, "%08X %08X %-3hd", + (__force u32) mfc->mfc_mcastgrp, + (__force u32) mfc->mfc_origin, mfc->mfc_parent); - if (it->cache != &mfc_unres_queue) { + if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; - n < mfc->mfc_un.res.maxvif; n++ ) { - if (VIF_EXISTS(net, n) && + n < mfc->mfc_un.res.maxvif; n++) { + if (VIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", @@ -1970,45 +2701,27 @@ static const struct net_protocol pim_protocol = { */ static int __net_init ipmr_net_init(struct net *net) { - int err = 0; + int err; - net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), - GFP_KERNEL); - if (!net->ipv4.vif_table) { - err = -ENOMEM; + err = ipmr_rules_init(net); + if (err < 0) goto fail; - } - - /* Forwarding cache */ - net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, - sizeof(struct mfc_cache *), - GFP_KERNEL); - if (!net->ipv4.mfc_cache_array) { - err = -ENOMEM; - goto fail_mfc_cache; - } - -#ifdef CONFIG_IP_PIMSM - net->ipv4.mroute_reg_vif_num = -1; -#endif #ifdef CONFIG_PROC_FS err = -ENOMEM; - if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) + if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops)) goto proc_vif_fail; - if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) + if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops)) goto proc_cache_fail; #endif return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: - proc_net_remove(net, "ip_mr_vif"); + remove_proc_entry("ip_mr_vif", net->proc_net); proc_vif_fail: - kfree(net->ipv4.mfc_cache_array); + ipmr_rules_exit(net); #endif -fail_mfc_cache: - kfree(net->ipv4.vif_table); fail: return err; } @@ -2016,11 +2729,10 @@ fail: static void __net_exit ipmr_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS - proc_net_remove(net, "ip_mr_cache"); - proc_net_remove(net, "ip_mr_vif"); + remove_proc_entry("ip_mr_cache", net->proc_net); + remove_proc_entry("ip_mr_vif", net->proc_net); #endif - kfree(net->ipv4.mfc_cache_array); - kfree(net->ipv4.vif_table); + ipmr_rules_exit(net); } static struct pernet_operations ipmr_net_ops = { @@ -2034,7 +2746,7 @@ int __init ip_mr_init(void) mrt_cachep = kmem_cache_create("ip_mrt_cache", sizeof(struct mfc_cache), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); if (!mrt_cachep) return -ENOMEM; @@ -2043,17 +2755,18 @@ int __init ip_mr_init(void) if (err) goto reg_pernet_fail; - setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); err = register_netdevice_notifier(&ip_mr_notifier); if (err) goto reg_notif_fail; #ifdef CONFIG_IP_PIMSM_V2 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { - printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); + pr_err("%s: can't add PIM protocol\n", __func__); err = -EAGAIN; goto add_proto_fail; } #endif + rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, + NULL, ipmr_rtm_dumproute, NULL); return 0; #ifdef CONFIG_IP_PIMSM_V2 @@ -2061,7 +2774,6 @@ add_proto_fail: unregister_netdevice_notifier(&ip_mr_notifier); #endif reg_notif_fail: - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ipmr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); |
