diff options
Diffstat (limited to 'net/bridge')
33 files changed, 3163 insertions, 220 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index e143ca67888..19a6b9629c5 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig @@ -31,3 +31,16 @@ config BRIDGE will be called bridge. If unsure, say N. + +config BRIDGE_IGMP_SNOOPING + bool "IGMP snooping" + depends on BRIDGE + default y + ---help--- + If you say Y here, then the Ethernet bridge will be able selectively + forward multicast traffic based on IGMP traffic received from each + port. + + Say N to exclude this support and reduce the binary size. + + If unsure, say Y. diff --git a/net/bridge/Makefile b/net/bridge/Makefile index f444c12cde5..d0359ea8ee7 100644 --- a/net/bridge/Makefile +++ b/net/bridge/Makefile @@ -12,4 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o +bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o + obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 1a99c4e04e8..eb7062d2e9e 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -25,6 +25,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; + struct net_bridge_mdb_entry *mdst; + + BR_INPUT_SKB_CB(skb)->brdev = dev; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -32,13 +35,21 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); - if (dest[0] & 1) - br_flood_deliver(br, skb); - else if ((dst = __br_fdb_get(br, dest)) != NULL) + if (dest[0] & 1) { + if (br_multicast_rcv(br, NULL, skb)) + goto out; + + mdst = br_mdb_get(br, skb); + if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) + br_multicast_deliver(mdst, skb); + else + br_flood_deliver(br, skb); + } else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); +out: return NETDEV_TX_OK; } @@ -49,6 +60,7 @@ static int br_dev_open(struct net_device *dev) br_features_recompute(br); netif_start_queue(dev); br_stp_enable_bridge(br); + br_multicast_open(br); return 0; } @@ -59,7 +71,10 @@ static void br_dev_set_multicast_list(struct net_device *dev) static int br_dev_stop(struct net_device *dev) { - br_stp_disable_bridge(netdev_priv(dev)); + struct net_bridge *br = netdev_priv(dev); + + br_stp_disable_bridge(br); + br_multicast_stop(br); netif_stop_queue(dev); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index bc1704ac6cd..d61e6f74112 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -11,6 +11,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/err.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/skbuff.h> @@ -103,51 +104,152 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) kfree_skb(skb); } -/* called under bridge lock */ -static void br_flood(struct net_bridge *br, struct sk_buff *skb, +static int deliver_clone(struct net_bridge_port *prev, struct sk_buff *skb, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb)) +{ + skb = skb_clone(skb, GFP_ATOMIC); + if (!skb) { + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + + dev->stats.tx_dropped++; + return -ENOMEM; + } + + __packet_hook(prev, skb); + return 0; +} + +static struct net_bridge_port *maybe_deliver( + struct net_bridge_port *prev, struct net_bridge_port *p, + struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { + int err; + + if (!should_deliver(p, skb)) + return prev; + + if (!prev) + goto out; + + err = deliver_clone(prev, skb, __packet_hook); + if (err) + return ERR_PTR(err); + +out: + return p; +} + +/* called under bridge lock */ +static void br_flood(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb0, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb)) +{ struct net_bridge_port *p; struct net_bridge_port *prev; prev = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { - if (should_deliver(p, skb)) { - if (prev != NULL) { - struct sk_buff *skb2; - - if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { - br->dev->stats.tx_dropped++; - kfree_skb(skb); - return; - } - - __packet_hook(prev, skb2); - } - - prev = p; - } + prev = maybe_deliver(prev, p, skb, __packet_hook); + if (IS_ERR(prev)) + goto out; } - if (prev != NULL) { + if (!prev) + goto out; + + if (skb0) + deliver_clone(prev, skb, __packet_hook); + else __packet_hook(prev, skb); - return; - } + return; - kfree_skb(skb); +out: + if (!skb0) + kfree_skb(skb); } /* called with rcu_read_lock */ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) { - br_flood(br, skb, __br_deliver); + br_flood(br, skb, NULL, __br_deliver); } /* called under bridge lock */ -void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) +void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb2) +{ + br_flood(br, skb, skb2, __br_forward); +} + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +/* called with rcu_read_lock */ +static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, struct sk_buff *skb0, + void (*__packet_hook)( + const struct net_bridge_port *p, + struct sk_buff *skb)) +{ + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *port; + struct net_bridge_port *lport, *rport; + struct net_bridge_port *prev; + struct net_bridge_port_group *p; + struct hlist_node *rp; + + prev = NULL; + + rp = br->router_list.first; + p = mdst ? mdst->ports : NULL; + while (p || rp) { + lport = p ? p->port : NULL; + rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : + NULL; + + port = (unsigned long)lport > (unsigned long)rport ? + lport : rport; + + prev = maybe_deliver(prev, port, skb, __packet_hook); + if (IS_ERR(prev)) + goto out; + + if ((unsigned long)lport >= (unsigned long)port) + p = p->next; + if ((unsigned long)rport >= (unsigned long)port) + rp = rp->next; + } + + if (!prev) + goto out; + + if (skb0) + deliver_clone(prev, skb, __packet_hook); + else + __packet_hook(prev, skb); + return; + +out: + if (!skb0) + kfree_skb(skb); +} + +/* called with rcu_read_lock */ +void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb) +{ + br_multicast_flood(mdst, skb, NULL, __br_deliver); +} + +/* called with rcu_read_lock */ +void br_multicast_forward(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, struct sk_buff *skb2) { - br_flood(br, skb, __br_forward); + br_multicast_flood(mdst, skb, skb2, __br_forward); } +#endif diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index a2cbe61f6e6..b6a3872f568 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -147,6 +147,8 @@ static void del_nbp(struct net_bridge_port *p) rcu_assign_pointer(dev->br_port, NULL); + br_multicast_del_port(p); + kobject_uevent(&p->kobj, KOBJ_REMOVE); kobject_del(&p->kobj); @@ -206,9 +208,8 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) br_netfilter_rtable_init(br); - INIT_LIST_HEAD(&br->age_list); - br_stp_timer_init(br); + br_multicast_init(br); return dev; } @@ -260,6 +261,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, br_init_port(p); p->state = BR_STATE_DISABLED; br_stp_port_timer_init(p); + br_multicast_add_port(p); return p; } @@ -467,7 +469,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) return 0; } -void br_net_exit(struct net *net) +void __net_exit br_net_exit(struct net *net) { struct net_device *dev; LIST_HEAD(list); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5ee1a3682bf..53b39851d87 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -20,9 +20,9 @@ /* Bridge group multicast address 802.1d (pg 51). */ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) +static int br_pass_frame_up(struct sk_buff *skb) { - struct net_device *indev, *brdev = br->dev; + struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; brdev->stats.rx_packets++; brdev->stats.rx_bytes += skb->len; @@ -30,8 +30,8 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) indev = skb->dev; skb->dev = brdev; - NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, - netif_receive_skb); + return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, + netif_receive_skb); } /* note: already called with rcu_read_lock (preempt_disabled) */ @@ -41,6 +41,7 @@ int br_handle_frame_finish(struct sk_buff *skb) struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); struct net_bridge *br; struct net_bridge_fdb_entry *dst; + struct net_bridge_mdb_entry *mdst; struct sk_buff *skb2; if (!p || p->state == BR_STATE_DISABLED) @@ -50,9 +51,15 @@ int br_handle_frame_finish(struct sk_buff *skb) br = p->br; br_fdb_update(br, p, eth_hdr(skb)->h_source); + if (is_multicast_ether_addr(dest) && + br_multicast_rcv(br, p, skb)) + goto drop; + if (p->state == BR_STATE_LEARNING) goto drop; + BR_INPUT_SKB_CB(skb)->brdev = br->dev; + /* The packet skb2 goes to the local host (NULL to skip). */ skb2 = NULL; @@ -62,27 +69,35 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_multicast_ether_addr(dest)) { + mdst = br_mdb_get(br, skb); + if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) { + if ((mdst && !hlist_unhashed(&mdst->mglist)) || + br_multicast_is_router(br)) + skb2 = skb; + br_multicast_forward(mdst, skb, skb2); + skb = NULL; + if (!skb2) + goto out; + } else + skb2 = skb; + br->dev->stats.multicast++; - skb2 = skb; } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { skb2 = skb; /* Do not forward the packet since it's local. */ skb = NULL; } - if (skb2 == skb) - skb2 = skb_clone(skb, GFP_ATOMIC); - - if (skb2) - br_pass_frame_up(br, skb2); - if (skb) { if (dst) br_forward(dst->dst, skb); else - br_flood_forward(br, skb); + br_flood_forward(br, skb, skb2); } + if (skb2) + return br_pass_frame_up(skb2); + out: return 0; drop: diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c new file mode 100644 index 00000000000..2559fb53983 --- /dev/null +++ b/net/bridge/br_multicast.c @@ -0,0 +1,1304 @@ +/* + * Bridge multicast support. + * + * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/err.h> +#include <linux/if_ether.h> +#include <linux/igmp.h> +#include <linux/jhash.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/netdevice.h> +#include <linux/netfilter_bridge.h> +#include <linux/random.h> +#include <linux/rculist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <net/ip.h> + +#include "br_private.h" + +static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) +{ + return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); +} + +static struct net_bridge_mdb_entry *__br_mdb_ip_get( + struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) +{ + struct net_bridge_mdb_entry *mp; + struct hlist_node *p; + + hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { + if (dst == mp->addr) + return mp; + } + + return NULL; +} + +static struct net_bridge_mdb_entry *br_mdb_ip_get( + struct net_bridge_mdb_htable *mdb, __be32 dst) +{ + return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); +} + +struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, + struct sk_buff *skb) +{ + struct net_bridge_mdb_htable *mdb = br->mdb; + + if (!mdb || br->multicast_disabled) + return NULL; + + switch (skb->protocol) { + case htons(ETH_P_IP): + if (BR_INPUT_SKB_CB(skb)->igmp) + break; + return br_mdb_ip_get(mdb, ip_hdr(skb)->daddr); + } + + return NULL; +} + +static void br_mdb_free(struct rcu_head *head) +{ + struct net_bridge_mdb_htable *mdb = + container_of(head, struct net_bridge_mdb_htable, rcu); + struct net_bridge_mdb_htable *old = mdb->old; + + mdb->old = NULL; + kfree(old->mhash); + kfree(old); +} + +static int br_mdb_copy(struct net_bridge_mdb_htable *new, + struct net_bridge_mdb_htable *old, + int elasticity) +{ + struct net_bridge_mdb_entry *mp; + struct hlist_node *p; + int maxlen; + int len; + int i; + + for (i = 0; i < old->max; i++) + hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) + hlist_add_head(&mp->hlist[new->ver], + &new->mhash[br_ip_hash(new, mp->addr)]); + + if (!elasticity) + return 0; + + maxlen = 0; + for (i = 0; i < new->max; i++) { + len = 0; + hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) + len++; + if (len > maxlen) + maxlen = len; + } + + return maxlen > elasticity ? -EINVAL : 0; +} + +static void br_multicast_free_pg(struct rcu_head *head) +{ + struct net_bridge_port_group *p = + container_of(head, struct net_bridge_port_group, rcu); + + kfree(p); +} + +static void br_multicast_free_group(struct rcu_head *head) +{ + struct net_bridge_mdb_entry *mp = + container_of(head, struct net_bridge_mdb_entry, rcu); + + kfree(mp); +} + +static void br_multicast_group_expired(unsigned long data) +{ + struct net_bridge_mdb_entry *mp = (void *)data; + struct net_bridge *br = mp->br; + struct net_bridge_mdb_htable *mdb; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || timer_pending(&mp->timer)) + goto out; + + if (!hlist_unhashed(&mp->mglist)) + hlist_del_init(&mp->mglist); + + if (mp->ports) + goto out; + + mdb = br->mdb; + hlist_del_rcu(&mp->hlist[mdb->ver]); + mdb->size--; + + del_timer(&mp->query_timer); + call_rcu_bh(&mp->rcu, br_multicast_free_group); + +out: + spin_unlock(&br->multicast_lock); +} + +static void br_multicast_del_pg(struct net_bridge *br, + struct net_bridge_port_group *pg) +{ + struct net_bridge_mdb_htable *mdb = br->mdb; + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p; + struct net_bridge_port_group **pp; + + mp = br_mdb_ip_get(mdb, pg->addr); + if (WARN_ON(!mp)) + return; + + for (pp = &mp->ports; (p = *pp); pp = &p->next) { + if (p != pg) + continue; + + *pp = p->next; + hlist_del_init(&p->mglist); + del_timer(&p->timer); + del_timer(&p->query_timer); + call_rcu_bh(&p->rcu, br_multicast_free_pg); + + if (!mp->ports && hlist_unhashed(&mp->mglist) && + netif_running(br->dev)) + mod_timer(&mp->timer, jiffies); + + return; + } + + WARN_ON(1); +} + +static void br_multicast_port_group_expired(unsigned long data) +{ + struct net_bridge_port_group *pg = (void *)data; + struct net_bridge *br = pg->port->br; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || timer_pending(&pg->timer) || + hlist_unhashed(&pg->mglist)) + goto out; + + br_multicast_del_pg(br, pg); + +out: + spin_unlock(&br->multicast_lock); +} + +static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max, + int elasticity) +{ + struct net_bridge_mdb_htable *old = *mdbp; + struct net_bridge_mdb_htable *mdb; + int err; + + mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); + if (!mdb) + return -ENOMEM; + + mdb->max = max; + mdb->old = old; + + mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); + if (!mdb->mhash) { + kfree(mdb); + return -ENOMEM; + } + + mdb->size = old ? old->size : 0; + mdb->ver = old ? old->ver ^ 1 : 0; + + if (!old || elasticity) + get_random_bytes(&mdb->secret, sizeof(mdb->secret)); + else + mdb->secret = old->secret; + + if (!old) + goto out; + + err = br_mdb_copy(mdb, old, elasticity); + if (err) { + kfree(mdb->mhash); + kfree(mdb); + return err; + } + + call_rcu_bh(&mdb->rcu, br_mdb_free); + +out: + rcu_assign_pointer(*mdbp, mdb); + + return 0; +} + +static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, + __be32 group) +{ + struct sk_buff *skb; + struct igmphdr *ih; + struct ethhdr *eth; + struct iphdr *iph; + + skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + + sizeof(*ih) + 4); + if (!skb) + goto out; + + skb->protocol = htons(ETH_P_IP); + + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + + memcpy(eth->h_source, br->dev->dev_addr, 6); + eth->h_dest[0] = 1; + eth->h_dest[1] = 0; + eth->h_dest[2] = 0x5e; + eth->h_dest[3] = 0; + eth->h_dest[4] = 0; + eth->h_dest[5] = 1; + eth->h_proto = htons(ETH_P_IP); + skb_put(skb, sizeof(*eth)); + + skb_set_network_header(skb, skb->len); + iph = ip_hdr(skb); + + iph->version = 4; + iph->ihl = 6; + iph->tos = 0xc0; + iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); + iph->id = 0; + iph->frag_off = htons(IP_DF); + iph->ttl = 1; + iph->protocol = IPPROTO_IGMP; + iph->saddr = 0; + iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); + ((u8 *)&iph[1])[0] = IPOPT_RA; + ((u8 *)&iph[1])[1] = 4; + ((u8 *)&iph[1])[2] = 0; + ((u8 *)&iph[1])[3] = 0; + ip_send_check(iph); + skb_put(skb, 24); + + skb_set_transport_header(skb, skb->len); + ih = igmp_hdr(skb); + ih->type = IGMP_HOST_MEMBERSHIP_QUERY; + ih->code = (group ? br->multicast_last_member_interval : + br->multicast_query_response_interval) / + (HZ / IGMP_TIMER_SCALE); + ih->group = group; + ih->csum = 0; + ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); + skb_put(skb, sizeof(*ih)); + + __skb_pull(skb, sizeof(*eth)); + +out: + return skb; +} + +static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) +{ + struct net_bridge *br = mp->br; + struct sk_buff *skb; + + skb = br_multicast_alloc_query(br, mp->addr); + if (!skb) + goto timer; + + netif_rx(skb); + +timer: + if (++mp->queries_sent < br->multicast_last_member_count) + mod_timer(&mp->query_timer, + jiffies + br->multicast_last_member_interval); +} + +static void br_multicast_group_query_expired(unsigned long data) +{ + struct net_bridge_mdb_entry *mp = (void *)data; + struct net_bridge *br = mp->br; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || + mp->queries_sent >= br->multicast_last_member_count) + goto out; + + br_multicast_send_group_query(mp); + +out: + spin_unlock(&br->multicast_lock); +} + +static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) +{ + struct net_bridge_port *port = pg->port; + struct net_bridge *br = port->br; + struct sk_buff *skb; + + skb = br_multicast_alloc_query(br, pg->addr); + if (!skb) + goto timer; + + br_deliver(port, skb); + +timer: + if (++pg->queries_sent < br->multicast_last_member_count) + mod_timer(&pg->query_timer, + jiffies + br->multicast_last_member_interval); +} + +static void br_multicast_port_group_query_expired(unsigned long data) +{ + struct net_bridge_port_group *pg = (void *)data; + struct net_bridge_port *port = pg->port; + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || + pg->queries_sent >= br->multicast_last_member_count) + goto out; + + br_multicast_send_port_group_query(pg); + +out: + spin_unlock(&br->multicast_lock); +} + +static struct net_bridge_mdb_entry *br_multicast_get_group( + struct net_bridge *br, struct net_bridge_port *port, __be32 group, + int hash) +{ + struct net_bridge_mdb_htable *mdb = br->mdb; + struct net_bridge_mdb_entry *mp; + struct hlist_node *p; + unsigned count = 0; + unsigned max; + int elasticity; + int err; + + hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { + count++; + if (unlikely(group == mp->addr)) { + return mp; + } + } + + elasticity = 0; + max = mdb->max; + + if (unlikely(count > br->hash_elasticity && count)) { + if (net_ratelimit()) + printk(KERN_INFO "%s: Multicast hash table " + "chain limit reached: %s\n", + br->dev->name, port ? port->dev->name : + br->dev->name); + + elasticity = br->hash_elasticity; + } + + if (mdb->size >= max) { + max *= 2; + if (unlikely(max >= br->hash_max)) { + printk(KERN_WARNING "%s: Multicast hash table maximum " + "reached, disabling snooping: %s, %d\n", + br->dev->name, port ? port->dev->name : + br->dev->name, + max); + err = -E2BIG; +disable: + br->multicast_disabled = 1; + goto err; + } + } + + if (max > mdb->max || elasticity) { + if (mdb->old) { + if (net_ratelimit()) + printk(KERN_INFO "%s: Multicast hash table " + "on fire: %s\n", + br->dev->name, port ? port->dev->name : + br->dev->name); + err = -EEXIST; + goto err; + } + + err = br_mdb_rehash(&br->mdb, max, elasticity); + if (err) { + printk(KERN_WARNING "%s: Cannot rehash multicast " + "hash table, disabling snooping: " + "%s, %d, %d\n", + br->dev->name, port ? port->dev->name : + br->dev->name, + mdb->size, err); + goto disable; + } + + err = -EAGAIN; + goto err; + } + + return NULL; + +err: + mp = ERR_PTR(err); + return mp; +} + +static struct net_bridge_mdb_entry *br_multicast_new_group( + struct net_bridge *br, struct net_bridge_port *port, __be32 group) +{ + struct net_bridge_mdb_htable *mdb = br->mdb; + struct net_bridge_mdb_entry *mp; + int hash; + + if (!mdb) { + if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0)) + return NULL; + goto rehash; + } + + hash = br_ip_hash(mdb, group); + mp = br_multicast_get_group(br, port, group, hash); + switch (PTR_ERR(mp)) { + case 0: + break; + + case -EAGAIN: +rehash: + mdb = br->mdb; + hash = br_ip_hash(mdb, group); + break; + + default: + goto out; + } + + mp = kzalloc(sizeof(*mp), GFP_ATOMIC); + if (unlikely(!mp)) + goto out; + + mp->br = br; + mp->addr = group; + setup_timer(&mp->timer, br_multicast_group_expired, + (unsigned long)mp); + setup_timer(&mp->query_timer, br_multicast_group_query_expired, + (unsigned long)mp); + + hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); + mdb->size++; + +out: + return mp; +} + +static int br_multicast_add_group(struct net_bridge *br, + struct net_bridge_port *port, __be32 group) +{ + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p; + struct net_bridge_port_group **pp; + unsigned long now = jiffies; + int err; + + if (ipv4_is_local_multicast(group)) + return 0; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || + (port && port->state == BR_STATE_DISABLED)) + goto out; + + mp = br_multicast_new_group(br, port, group); + err = PTR_ERR(mp); + if (unlikely(IS_ERR(mp) || !mp)) + goto err; + + if (!port) { + hlist_add_head(&mp->mglist, &br->mglist); + mod_timer(&mp->timer, now + br->multicast_membership_interval); + goto out; + } + + for (pp = &mp->ports; (p = *pp); pp = &p->next) { + if (p->port == port) + goto found; + if ((unsigned long)p->port < (unsigned long)port) + break; + } + + p = kzalloc(sizeof(*p), GFP_ATOMIC); + err = -ENOMEM; + if (unlikely(!p)) + goto err; + + p->addr = group; + p->port = port; + p->next = *pp; + hlist_add_head(&p->mglist, &port->mglist); + setup_timer(&p->timer, br_multicast_port_group_expired, + (unsigned long)p); + setup_timer(&p->query_timer, br_multicast_port_group_query_expired, + (unsigned long)p); + + rcu_assign_pointer(*pp, p); + +found: + mod_timer(&p->timer, now + br->multicast_membership_interval); +out: + err = 0; + +err: + spin_unlock(&br->multicast_lock); + return err; +} + +static void br_multicast_router_expired(unsigned long data) +{ + struct net_bridge_port *port = (void *)data; + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + if (port->multicast_router != 1 || + timer_pending(&port->multicast_router_timer) || + hlist_unhashed(&port->rlist)) + goto out; + + hlist_del_init_rcu(&port->rlist); + +out: + spin_unlock(&br->multicast_lock); +} + +static void br_multicast_local_router_expired(unsigned long data) +{ +} + +static void br_multicast_send_query(struct net_bridge *br, + struct net_bridge_port *port, u32 sent) +{ + unsigned long time; + struct sk_buff *skb; + + if (!netif_running(br->dev) || br->multicast_disabled || + timer_pending(&br->multicast_querier_timer)) + return; + + skb = br_multicast_alloc_query(br, 0); + if (!skb) + goto timer; + + if (port) { + __skb_push(skb, sizeof(struct ethhdr)); + skb->dev = port->dev; + NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, + dev_queue_xmit); + } else + netif_rx(skb); + +timer: + time = jiffies; + time += sent < br->multicast_startup_query_count ? + br->multicast_startup_query_interval : + br->multicast_query_interval; + mod_timer(port ? &port->multicast_query_timer : + &br->multicast_query_timer, time); +} + +static void br_multicast_port_query_expired(unsigned long data) +{ + struct net_bridge_port *port = (void *)data; + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + if (port && (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING)) + goto out; + + if (port->multicast_startup_queries_sent < + br->multicast_startup_query_count) + port->multicast_startup_queries_sent++; + + br_multicast_send_query(port->br, port, + port->multicast_startup_queries_sent); + +out: + spin_unlock(&br->multicast_lock); +} + +void br_multicast_add_port(struct net_bridge_port *port) +{ + port->multicast_router = 1; + + setup_timer(&port->multicast_router_timer, br_multicast_router_expired, + (unsigned long)port); + setup_timer(&port->multicast_query_timer, + br_multicast_port_query_expired, (unsigned long)port); +} + +void br_multicast_del_port(struct net_bridge_port *port) +{ + del_timer_sync(&port->multicast_router_timer); +} + +static void __br_multicast_enable_port(struct net_bridge_port *port) +{ + port->multicast_startup_queries_sent = 0; + + if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || + del_timer(&port->multicast_query_timer)) + mod_timer(&port->multicast_query_timer, jiffies); +} + +void br_multicast_enable_port(struct net_bridge_port *port) +{ + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + if (br->multicast_disabled || !netif_running(br->dev)) + goto out; + + __br_multicast_enable_port(port); + +out: + spin_unlock(&br->multicast_lock); +} + +void br_multicast_disable_port(struct net_bridge_port *port) +{ + struct net_bridge *br = port->br; + struct net_bridge_port_group *pg; + struct hlist_node *p, *n; + + spin_lock(&br->multicast_lock); + hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) + br_multicast_del_pg(br, pg); + + if (!hlist_unhashed(&port->rlist)) + hlist_del_init_rcu(&port->rlist); + del_timer(&port->multicast_router_timer); + del_timer(&port->multicast_query_timer); + spin_unlock(&br->multicast_lock); +} + +static int br_multicast_igmp3_report(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct igmpv3_report *ih; + struct igmpv3_grec *grec; + int i; + int len; + int num; + int type; + int err = 0; + __be32 group; + + if (!pskb_may_pull(skb, sizeof(*ih))) + return -EINVAL; + + ih = igmpv3_report_hdr(skb); + num = ntohs(ih->ngrec); + len = sizeof(*ih); + + for (i = 0; i < num; i++) { + len += sizeof(*grec); + if (!pskb_may_pull(skb, len)) + return -EINVAL; + + grec = (void *)(skb->data + len); + group = grec->grec_mca; + type = grec->grec_type; + + len += grec->grec_nsrcs * 4; + if (!pskb_may_pull(skb, len)) + return -EINVAL; + + /* We treat this as an IGMPv2 report for now. */ + switch (type) { + case IGMPV3_MODE_IS_INCLUDE: + case IGMPV3_MODE_IS_EXCLUDE: + case IGMPV3_CHANGE_TO_INCLUDE: + case IGMPV3_CHANGE_TO_EXCLUDE: + case IGMPV3_ALLOW_NEW_SOURCES: + case IGMPV3_BLOCK_OLD_SOURCES: + break; + + default: + continue; + } + + err = br_multicast_add_group(br, port, group); + if (err) + break; + } + + return err; +} + +static void br_multicast_add_router(struct net_bridge *br, + struct net_bridge_port *port) +{ + struct hlist_node *p; + struct hlist_node **h; + + for (h = &br->router_list.first; + (p = *h) && + (unsigned long)container_of(p, struct net_bridge_port, rlist) > + (unsigned long)port; + h = &p->next) + ; + + port->rlist.pprev = h; + port->rlist.next = p; + rcu_assign_pointer(*h, &port->rlist); + if (p) + p->pprev = &port->rlist.next; +} + +static void br_multicast_mark_router(struct net_bridge *br, + struct net_bridge_port *port) +{ + unsigned long now = jiffies; + + if (!port) { + if (br->multicast_router == 1) + mod_timer(&br->multicast_router_timer, + now + br->multicast_querier_interval); + return; + } + + if (port->multicast_router != 1) + return; + + if (!hlist_unhashed(&port->rlist)) + goto timer; + + br_multicast_add_router(br, port); + +timer: + mod_timer(&port->multicast_router_timer, + now + br->multicast_querier_interval); +} + +static void br_multicast_query_received(struct net_bridge *br, + struct net_bridge_port *port, + __be32 saddr) +{ + if (saddr) + mod_timer(&br->multicast_querier_timer, + jiffies + br->multicast_querier_interval); + else if (timer_pending(&br->multicast_querier_timer)) + return; + + br_multicast_mark_router(br, port); +} + +static int br_multicast_query(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + struct igmphdr *ih = igmp_hdr(skb); + struct net_bridge_mdb_entry *mp; + struct igmpv3_query *ih3; + struct net_bridge_port_group *p; + struct net_bridge_port_group **pp; + unsigned long max_delay; + unsigned long now = jiffies; + __be32 group; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || + (port && port->state == BR_STATE_DISABLED)) + goto out; + + br_multicast_query_received(br, port, iph->saddr); + + group = ih->group; + + if (skb->len == sizeof(*ih)) { + max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); + + if (!max_delay) { + max_delay = 10 * HZ; + group = 0; + } + } else { + if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) + return -EINVAL; + + ih3 = igmpv3_query_hdr(skb); + if (ih3->nsrcs) + return 0; + + max_delay = ih3->code ? 1 : + IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); + } + + if (!group) + goto out; + + mp = br_mdb_ip_get(br->mdb, group); + if (!mp) + goto out; + + max_delay *= br->multicast_last_member_count; + + if (!hlist_unhashed(&mp->mglist) && + (timer_pending(&mp->timer) ? + time_after(mp->timer.expires, now + max_delay) : + try_to_del_timer_sync(&mp->timer) >= 0)) + mod_timer(&mp->timer, now + max_delay); + + for (pp = &mp->ports; (p = *pp); pp = &p->next) { + if (timer_pending(&p->timer) ? + time_after(p->timer.expires, now + max_delay) : + try_to_del_timer_sync(&p->timer) >= 0) + mod_timer(&mp->timer, now + max_delay); + } + +out: + spin_unlock(&br->multicast_lock); + return 0; +} + +static void br_multicast_leave_group(struct net_bridge *br, + struct net_bridge_port *port, + __be32 group) +{ + struct net_bridge_mdb_htable *mdb; + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p; + unsigned long now; + unsigned long time; + + if (ipv4_is_local_multicast(group)) + return; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || + (port && port->state == BR_STATE_DISABLED) || + timer_pending(&br->multicast_querier_timer)) + goto out; + + mdb = br->mdb; + mp = br_mdb_ip_get(mdb, group); + if (!mp) + goto out; + + now = jiffies; + time = now + br->multicast_last_member_count * + br->multicast_last_member_interval; + + if (!port) { + if (!hlist_unhashed(&mp->mglist) && + (timer_pending(&mp->timer) ? + time_after(mp->timer.expires, time) : + try_to_del_timer_sync(&mp->timer) >= 0)) { + mod_timer(&mp->timer, time); + + mp->queries_sent = 0; + mod_timer(&mp->query_timer, now); + } + + goto out; + } + + for (p = mp->ports; p; p = p->next) { + if (p->port != port) + continue; + + if (!hlist_unhashed(&p->mglist) && + (timer_pending(&p->timer) ? + time_after(p->timer.expires, time) : + try_to_del_timer_sync(&p->timer) >= 0)) { + mod_timer(&p->timer, time); + + p->queries_sent = 0; + mod_timer(&p->query_timer, now); + } + + break; + } + +out: + spin_unlock(&br->multicast_lock); +} + +static int br_multicast_ipv4_rcv(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct sk_buff *skb2 = skb; + struct iphdr *iph; + struct igmphdr *ih; + unsigned len; + unsigned offset; + int err; + + BR_INPUT_SKB_CB(skb)->igmp = 0; + BR_INPUT_SKB_CB(skb)->mrouters_only = 0; + + /* We treat OOM as packet loss for now. */ + if (!pskb_may_pull(skb, sizeof(*iph))) + return -EINVAL; + + iph = ip_hdr(skb); + + if (iph->ihl < 5 || iph->version != 4) + return -EINVAL; + + if (!pskb_may_pull(skb, ip_hdrlen(skb))) + return -EINVAL; + + iph = ip_hdr(skb); + + if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) + return -EINVAL; + + if (iph->protocol != IPPROTO_IGMP) + return 0; + + len = ntohs(iph->tot_len); + if (skb->len < len || len < ip_hdrlen(skb)) + return -EINVAL; + + if (skb->len > len) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (!skb2) + return -ENOMEM; + + err = pskb_trim_rcsum(skb2, len); + if (err) + return err; + } + + len -= ip_hdrlen(skb2); + offset = skb_network_offset(skb2) + ip_hdrlen(skb2); + __skb_pull(skb2, offset); + skb_reset_transport_header(skb2); + + err = -EINVAL; + if (!pskb_may_pull(skb2, sizeof(*ih))) + goto out; + + iph = ip_hdr(skb2); + + switch (skb2->ip_summed) { + case CHECKSUM_COMPLETE: + if (!csum_fold(skb2->csum)) + break; + /* fall through */ + case CHECKSUM_NONE: + skb2->csum = 0; + if (skb_checksum_complete(skb2)) + return -EINVAL; + } + + err = 0; + + BR_INPUT_SKB_CB(skb)->igmp = 1; + ih = igmp_hdr(skb2); + + switch (ih->type) { + case IGMP_HOST_MEMBERSHIP_REPORT: + case IGMPV2_HOST_MEMBERSHIP_REPORT: + BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; + err = br_multicast_add_group(br, port, ih->group); + break; + case IGMPV3_HOST_MEMBERSHIP_REPORT: + err = br_multicast_igmp3_report(br, port, skb2); + break; + case IGMP_HOST_MEMBERSHIP_QUERY: + err = br_multicast_query(br, port, skb2); + break; + case IGMP_HOST_LEAVE_MESSAGE: + br_multicast_leave_group(br, port, ih->group); + break; + } + +out: + __skb_push(skb2, offset); + if (skb2 != skb) + kfree_skb(skb2); + return err; +} + +int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, + struct sk_buff *skb) +{ + if (br->multicast_disabled) + return 0; + + switch (skb->protocol) { + case htons(ETH_P_IP): + return br_multicast_ipv4_rcv(br, port, skb); + } + + return 0; +} + +static void br_multicast_query_expired(unsigned long data) +{ + struct net_bridge *br = (void *)data; + + spin_lock(&br->multicast_lock); + if (br->multicast_startup_queries_sent < + br->multicast_startup_query_count) + br->multicast_startup_queries_sent++; + + br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); + + spin_unlock(&br->multicast_lock); +} + +void br_multicast_init(struct net_bridge *br) +{ + br->hash_elasticity = 4; + br->hash_max = 512; + + br->multicast_router = 1; + br->multicast_last_member_count = 2; + br->multicast_startup_query_count = 2; + + br->multicast_last_member_interval = HZ; + br->multicast_query_response_interval = 10 * HZ; + br->multicast_startup_query_interval = 125 * HZ / 4; + br->multicast_query_interval = 125 * HZ; + br->multicast_querier_interval = 255 * HZ; + br->multicast_membership_interval = 260 * HZ; + + spin_lock_init(&br->multicast_lock); + setup_timer(&br->multicast_router_timer, + br_multicast_local_router_expired, 0); + setup_timer(&br->multicast_querier_timer, + br_multicast_local_router_expired, 0); + setup_timer(&br->multicast_query_timer, br_multicast_query_expired, + (unsigned long)br); +} + +void br_multicast_open(struct net_bridge *br) +{ + br->multicast_startup_queries_sent = 0; + + if (br->multicast_disabled) + return; + + mod_timer(&br->multicast_query_timer, jiffies); +} + +void br_multicast_stop(struct net_bridge *br) +{ + struct net_bridge_mdb_htable *mdb; + struct net_bridge_mdb_entry *mp; + struct hlist_node *p, *n; + u32 ver; + int i; + + del_timer_sync(&br->multicast_router_timer); + del_timer_sync(&br->multicast_querier_timer); + del_timer_sync(&br->multicast_query_timer); + + spin_lock_bh(&br->multicast_lock); + mdb = br->mdb; + if (!mdb) + goto out; + + br->mdb = NULL; + + ver = mdb->ver; + for (i = 0; i < mdb->max; i++) { + hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], + hlist[ver]) { + del_timer(&mp->timer); + del_timer(&mp->query_timer); + call_rcu_bh(&mp->rcu, br_multicast_free_group); + } + } + + if (mdb->old) { + spin_unlock_bh(&br->multicast_lock); + synchronize_rcu_bh(); + spin_lock_bh(&br->multicast_lock); + WARN_ON(mdb->old); + } + + mdb->old = mdb; + call_rcu_bh(&mdb->rcu, br_mdb_free); + +out: + spin_unlock_bh(&br->multicast_lock); +} + +int br_multicast_set_router(struct net_bridge *br, unsigned long val) +{ + int err = -ENOENT; + + spin_lock_bh(&br->multicast_lock); + if (!netif_running(br->dev)) + goto unlock; + + switch (val) { + case 0: + case 2: + del_timer(&br->multicast_router_timer); + /* fall through */ + case 1: + br->multicast_router = val; + err = 0; + break; + + default: + err = -EINVAL; + break; + } + +unlock: + spin_unlock_bh(&br->multicast_lock); + + return err; +} + +int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) +{ + struct net_bridge *br = p->br; + int err = -ENOENT; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) + goto unlock; + + switch (val) { + case 0: + case 1: + case 2: + p->multicast_router = val; + err = 0; + + if (val < 2 && !hlist_unhashed(&p->rlist)) + hlist_del_init_rcu(&p->rlist); + + if (val == 1) + break; + + del_timer(&p->multicast_router_timer); + + if (val == 0) + break; + + br_multicast_add_router(br, p); + break; + + default: + err = -EINVAL; + break; + } + +unlock: + spin_unlock(&br->multicast_lock); + + return err; +} + +int br_multicast_toggle(struct net_bridge *br, unsigned long val) +{ + struct net_bridge_port *port; + int err = -ENOENT; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev)) + goto unlock; + + err = 0; + if (br->multicast_disabled == !val) + goto unlock; + + br->multicast_disabled = !val; + if (br->multicast_disabled) + goto unlock; + + if (br->mdb) { + if (br->mdb->old) { + err = -EEXIST; +rollback: + br->multicast_disabled = !!val; + goto unlock; + } + + err = br_mdb_rehash(&br->mdb, br->mdb->max, + br->hash_elasticity); + if (err) + goto rollback; + } + + br_multicast_open(br); + list_for_each_entry(port, &br->port_list, list) { + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) + continue; + + __br_multicast_enable_port(port); + } + +unlock: + spin_unlock(&br->multicast_lock); + + return err; +} + +int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) +{ + int err = -ENOENT; + u32 old; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev)) + goto unlock; + + err = -EINVAL; + if (!is_power_of_2(val)) + goto unlock; + if (br->mdb && val < br->mdb->size) + goto unlock; + + err = 0; + + old = br->hash_max; + br->hash_max = val; + + if (br->mdb) { + if (br->mdb->old) { + err = -EEXIST; +rollback: + br->hash_max = old; + goto unlock; + } + + err = br_mdb_rehash(&br->mdb, br->hash_max, + br->hash_elasticity); + if (err) + goto rollback; + } + +unlock: + spin_unlock(&br->multicast_lock); + + return err; +} diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2114e45682e..fef0384e3c0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -57,6 +57,41 @@ struct net_bridge_fdb_entry unsigned char is_static; }; +struct net_bridge_port_group { + struct net_bridge_port *port; + struct net_bridge_port_group *next; + struct hlist_node mglist; + struct rcu_head rcu; + struct timer_list timer; + struct timer_list query_timer; + __be32 addr; + u32 queries_sent; +}; + +struct net_bridge_mdb_entry +{ + struct hlist_node hlist[2]; + struct hlist_node mglist; + struct net_bridge *br; + struct net_bridge_port_group *ports; + struct rcu_head rcu; + struct timer_list timer; + struct timer_list query_timer; + __be32 addr; + u32 queries_sent; +}; + +struct net_bridge_mdb_htable +{ + struct hlist_head *mhash; + struct rcu_head rcu; + struct net_bridge_mdb_htable *old; + u32 size; + u32 max; + u32 secret; + u32 ver; +}; + struct net_bridge_port { struct net_bridge *br; @@ -84,6 +119,15 @@ struct net_bridge_port unsigned long flags; #define BR_HAIRPIN_MODE 0x00000001 + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + u32 multicast_startup_queries_sent; + unsigned char multicast_router; + struct timer_list multicast_router_timer; + struct timer_list multicast_query_timer; + struct hlist_head mglist; + struct hlist_node rlist; +#endif }; struct net_bridge @@ -93,7 +137,6 @@ struct net_bridge struct net_device *dev; spinlock_t hash_lock; struct hlist_head hash[BR_HASH_SIZE]; - struct list_head age_list; unsigned long feature_mask; #ifdef CONFIG_BRIDGE_NETFILTER struct rtable fake_rtable; @@ -125,6 +168,35 @@ struct net_bridge unsigned char topology_change; unsigned char topology_change_detected; +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + unsigned char multicast_router; + + u8 multicast_disabled:1; + + u32 hash_elasticity; + u32 hash_max; + + u32 multicast_last_member_count; + u32 multicast_startup_queries_sent; + u32 multicast_startup_query_count; + + unsigned long multicast_last_member_interval; + unsigned long multicast_membership_interval; + unsigned long multicast_querier_interval; + unsigned long multicast_query_interval; + unsigned long multicast_query_response_interval; + unsigned long multicast_startup_query_interval; + + spinlock_t multicast_lock; + struct net_bridge_mdb_htable *mdb; + struct hlist_head router_list; + struct hlist_head mglist; + + struct timer_list multicast_router_timer; + struct timer_list multicast_querier_timer; + struct timer_list multicast_query_timer; +#endif + struct timer_list hello_timer; struct timer_list tcn_timer; struct timer_list topology_change_timer; @@ -132,6 +204,14 @@ struct net_bridge struct kobject *ifobj; }; +struct br_input_skb_cb { + struct net_device *brdev; + int igmp; + int mrouters_only; +}; + +#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) + extern struct notifier_block br_device_notifier; extern const u8 br_group_address[ETH_ALEN]; @@ -175,7 +255,8 @@ extern void br_forward(const struct net_bridge_port *to, struct sk_buff *skb); extern int br_forward_finish(struct sk_buff *skb); extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); -extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); +extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb2); /* br_if.c */ extern void br_port_carrier_check(struct net_bridge_port *p); @@ -198,6 +279,94 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg); +/* br_multicast.c */ +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +extern int br_multicast_rcv(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb); +extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, + struct sk_buff *skb); +extern void br_multicast_add_port(struct net_bridge_port *port); +extern void br_multicast_del_port(struct net_bridge_port *port); +extern void br_multicast_enable_port(struct net_bridge_port *port); +extern void br_multicast_disable_port(struct net_bridge_port *port); +extern void br_multicast_init(struct net_bridge *br); +extern void br_multicast_open(struct net_bridge *br); +extern void br_multicast_stop(struct net_bridge *br); +extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb); +extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, struct sk_buff *skb2); +extern int br_multicast_set_router(struct net_bridge *br, unsigned long val); +extern int br_multicast_set_port_router(struct net_bridge_port *p, + unsigned long val); +extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); +extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); + +static inline bool br_multicast_is_router(struct net_bridge *br) +{ + return br->multicast_router == 2 || + (br->multicast_router == 1 && + timer_pending(&br->multicast_router_timer)); +} +#else +static inline int br_multicast_rcv(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + return 0; +} + +static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, + struct sk_buff *skb) +{ + return NULL; +} + +static inline void br_multicast_add_port(struct net_bridge_port *port) +{ +} + +static inline void br_multicast_del_port(struct net_bridge_port *port) +{ +} + +static inline void br_multicast_enable_port(struct net_bridge_port *port) +{ +} + +static inline void br_multicast_disable_port(struct net_bridge_port *port) +{ +} + +static inline void br_multicast_init(struct net_bridge *br) +{ +} + +static inline void br_multicast_open(struct net_bridge *br) +{ +} + +static inline void br_multicast_stop(struct net_bridge *br) +{ +} + +static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb) +{ +} + +static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, + struct sk_buff *skb2) +{ +} +static inline bool br_multicast_is_router(struct net_bridge *br) +{ + return 0; +} +#endif + /* br_netfilter.c */ #ifdef CONFIG_BRIDGE_NETFILTER extern int br_netfilter_init(void); @@ -254,7 +423,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port); #ifdef CONFIG_SYSFS /* br_sysfs_if.c */ -extern struct sysfs_ops brport_sysfs_ops; +extern const struct sysfs_ops brport_sysfs_ops; extern int br_sysfs_addif(struct net_bridge_port *p); /* br_sysfs_br.c */ diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index fd3f8d6c099..edcf14b560f 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -386,6 +386,8 @@ static void br_make_forwarding(struct net_bridge_port *p) else p->state = BR_STATE_LEARNING; + br_multicast_enable_port(p); + br_log_state(p); if (br->forward_delay != 0) diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 9a52ac5b452..d527119e9f5 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -108,6 +108,7 @@ void br_stp_disable_port(struct net_bridge_port *p) del_timer(&p->hold_timer); br_fdb_delete_by_port(br, p, 0); + br_multicast_disable_port(p); br_configuration_update(br); diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index bee4f300d0c..dd321e39e62 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -345,6 +345,273 @@ static ssize_t store_flush(struct device *d, } static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +static ssize_t show_multicast_router(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", br->multicast_router); +} + +static ssize_t store_multicast_router(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_router); +} +static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, + store_multicast_router); + +static ssize_t show_multicast_snooping(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", !br->multicast_disabled); +} + +static ssize_t store_multicast_snooping(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_toggle); +} +static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, + show_multicast_snooping, store_multicast_snooping); + +static ssize_t show_hash_elasticity(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->hash_elasticity); +} + +static int set_elasticity(struct net_bridge *br, unsigned long val) +{ + br->hash_elasticity = val; + return 0; +} + +static ssize_t store_hash_elasticity(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, set_elasticity); +} +static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity, + store_hash_elasticity); + +static ssize_t show_hash_max(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->hash_max); +} + +static ssize_t store_hash_max(struct device *d, struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_hash_max); +} +static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max, + store_hash_max); + +static ssize_t show_multicast_last_member_count(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->multicast_last_member_count); +} + +static int set_last_member_count(struct net_bridge *br, unsigned long val) +{ + br->multicast_last_member_count = val; + return 0; +} + +static ssize_t store_multicast_last_member_count(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, set_last_member_count); +} +static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR, + show_multicast_last_member_count, + store_multicast_last_member_count); + +static ssize_t show_multicast_startup_query_count( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->multicast_startup_query_count); +} + +static int set_startup_query_count(struct net_bridge *br, unsigned long val) +{ + br->multicast_startup_query_count = val; + return 0; +} + +static ssize_t store_multicast_startup_query_count( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_startup_query_count); +} +static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR, + show_multicast_startup_query_count, + store_multicast_startup_query_count); + +static ssize_t show_multicast_last_member_interval( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%lu\n", + jiffies_to_clock_t(br->multicast_last_member_interval)); +} + +static int set_last_member_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_last_member_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_last_member_interval( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_last_member_interval); +} +static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR, + show_multicast_last_member_interval, + store_multicast_last_member_interval); + +static ssize_t show_multicast_membership_interval( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%lu\n", + jiffies_to_clock_t(br->multicast_membership_interval)); +} + +static int set_membership_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_membership_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_membership_interval( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_membership_interval); +} +static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR, + show_multicast_membership_interval, + store_multicast_membership_interval); + +static ssize_t show_multicast_querier_interval(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%lu\n", + jiffies_to_clock_t(br->multicast_querier_interval)); +} + +static int set_querier_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_querier_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_querier_interval(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, set_querier_interval); +} +static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR, + show_multicast_querier_interval, + store_multicast_querier_interval); + +static ssize_t show_multicast_query_interval(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%lu\n", + jiffies_to_clock_t(br->multicast_query_interval)); +} + +static int set_query_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_query_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_query_interval(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, set_query_interval); +} +static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR, + show_multicast_query_interval, + store_multicast_query_interval); + +static ssize_t show_multicast_query_response_interval( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf( + buf, "%lu\n", + jiffies_to_clock_t(br->multicast_query_response_interval)); +} + +static int set_query_response_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_query_response_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_query_response_interval( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_query_response_interval); +} +static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR, + show_multicast_query_response_interval, + store_multicast_query_response_interval); + +static ssize_t show_multicast_startup_query_interval( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf( + buf, "%lu\n", + jiffies_to_clock_t(br->multicast_startup_query_interval)); +} + +static int set_startup_query_interval(struct net_bridge *br, unsigned long val) +{ + br->multicast_startup_query_interval = clock_t_to_jiffies(val); + return 0; +} + +static ssize_t store_multicast_startup_query_interval( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_startup_query_interval); +} +static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR, + show_multicast_startup_query_interval, + store_multicast_startup_query_interval); +#endif + static struct attribute *bridge_attrs[] = { &dev_attr_forward_delay.attr, &dev_attr_hello_time.attr, @@ -364,6 +631,20 @@ static struct attribute *bridge_attrs[] = { &dev_attr_gc_timer.attr, &dev_attr_group_addr.attr, &dev_attr_flush.attr, +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + &dev_attr_multicast_router.attr, + &dev_attr_multicast_snooping.attr, + &dev_attr_hash_elasticity.attr, + &dev_attr_hash_max.attr, + &dev_attr_multicast_last_member_count.attr, + &dev_attr_multicast_startup_query_count.attr, + &dev_attr_multicast_last_member_interval.attr, + &dev_attr_multicast_membership_interval.attr, + &dev_attr_multicast_querier_interval.attr, + &dev_attr_multicast_query_interval.attr, + &dev_attr_multicast_query_response_interval.attr, + &dev_attr_multicast_startup_query_interval.attr, +#endif NULL }; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 820643a3ba9..0b9916489d6 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -159,6 +159,21 @@ static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, show_hairpin_mode, store_hairpin_mode); +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) +{ + return sprintf(buf, "%d\n", p->multicast_router); +} + +static ssize_t store_multicast_router(struct net_bridge_port *p, + unsigned long v) +{ + return br_multicast_set_port_router(p, v); +} +static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, + store_multicast_router); +#endif + static struct brport_attribute *brport_attrs[] = { &brport_attr_path_cost, &brport_attr_priority, @@ -176,6 +191,9 @@ static struct brport_attribute *brport_attrs[] = { &brport_attr_hold_timer, &brport_attr_flush, &brport_attr_hairpin_mode, +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + &brport_attr_multicast_router, +#endif NULL }; @@ -220,7 +238,7 @@ static ssize_t brport_store(struct kobject * kobj, return ret; } -struct sysfs_ops brport_sysfs_ops = { +const struct sysfs_ops brport_sysfs_ops = { .show = brport_show, .store = brport_store, }; diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c index bd91dc58d49..5d1176758ca 100644 --- a/net/bridge/netfilter/ebt_802_3.c +++ b/net/bridge/netfilter/ebt_802_3.c @@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_802_3_mt, .checkentry = ebt_802_3_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), + .matchsize = sizeof(struct ebt_802_3_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index b7ad60419f9..e727697c584 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c @@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_arp_mt, .checkentry = ebt_arp_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), + .matchsize = sizeof(struct ebt_arp_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index 76584cd72e5..f392e9d93f5 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c @@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = { .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), .target = ebt_arpreply_tg, .checkentry = ebt_arpreply_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), + .targetsize = sizeof(struct ebt_arpreply_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c index 6b49ea9e31f..2bb40d728a3 100644 --- a/net/bridge/netfilter/ebt_dnat.c +++ b/net/bridge/netfilter/ebt_dnat.c @@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = { (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), .target = ebt_dnat_tg, .checkentry = ebt_dnat_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), + .targetsize = sizeof(struct ebt_nat_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c index d771bbfbcbe..5de6df6f86b 100644 --- a/net/bridge/netfilter/ebt_ip.c +++ b/net/bridge/netfilter/ebt_ip.c @@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_ip_mt, .checkentry = ebt_ip_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), + .matchsize = sizeof(struct ebt_ip_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c index 784a6573876..bbf2534ef02 100644 --- a/net/bridge/netfilter/ebt_ip6.c +++ b/net/bridge/netfilter/ebt_ip6.c @@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_ip6_mt, .checkentry = ebt_ip6_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), + .matchsize = sizeof(struct ebt_ip6_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index f7bd9192ff0..7a8182710eb 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c @@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par) return true; } + +#ifdef CONFIG_COMPAT +/* + * no conversion function needed -- + * only avg/burst have meaningful values in userspace. + */ +struct ebt_compat_limit_info { + compat_uint_t avg, burst; + compat_ulong_t prev; + compat_uint_t credit, credit_cap, cost; +}; +#endif + static struct xt_match ebt_limit_mt_reg __read_mostly = { .name = "limit", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_limit_mt, .checkentry = ebt_limit_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), + .matchsize = sizeof(struct ebt_limit_info), +#ifdef CONFIG_COMPAT + .compatsize = sizeof(struct ebt_compat_limit_info), +#endif .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index e4ea3fdd1d4..e873924ddb5 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c @@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_log_tg, .checkentry = ebt_log_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), + .targetsize = sizeof(struct ebt_log_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c index 2fee7e8e2e9..2b5ce533d6b 100644 --- a/net/bridge/netfilter/ebt_mark.c +++ b/net/bridge/netfilter/ebt_mark.c @@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par) return false; return true; } +#ifdef CONFIG_COMPAT +struct compat_ebt_mark_t_info { + compat_ulong_t mark; + compat_uint_t target; +}; + +static void mark_tg_compat_from_user(void *dst, const void *src) +{ + const struct compat_ebt_mark_t_info *user = src; + struct ebt_mark_t_info *kern = dst; + + kern->mark = user->mark; + kern->target = user->target; +} + +static int mark_tg_compat_to_user(void __user *dst, const void *src) +{ + struct compat_ebt_mark_t_info __user *user = dst; + const struct ebt_mark_t_info *kern = src; + + if (put_user(kern->mark, &user->mark) || + put_user(kern->target, &user->target)) + return -EFAULT; + return 0; +} +#endif static struct xt_target ebt_mark_tg_reg __read_mostly = { .name = "mark", @@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_mark_tg, .checkentry = ebt_mark_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), + .targetsize = sizeof(struct ebt_mark_t_info), +#ifdef CONFIG_COMPAT + .compatsize = sizeof(struct compat_ebt_mark_t_info), + .compat_from_user = mark_tg_compat_from_user, + .compat_to_user = mark_tg_compat_to_user, +#endif .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c index ea570f214b1..8de8c396d91 100644 --- a/net/bridge/netfilter/ebt_mark_m.c +++ b/net/bridge/netfilter/ebt_mark_m.c @@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par) return true; } + +#ifdef CONFIG_COMPAT +struct compat_ebt_mark_m_info { + compat_ulong_t mark, mask; + uint8_t invert, bitmask; +}; + +static void mark_mt_compat_from_user(void *dst, const void *src) +{ + const struct compat_ebt_mark_m_info *user = src; + struct ebt_mark_m_info *kern = dst; + + kern->mark = user->mark; + kern->mask = user->mask; + kern->invert = user->invert; + kern->bitmask = user->bitmask; +} + +static int mark_mt_compat_to_user(void __user *dst, const void *src) +{ + struct compat_ebt_mark_m_info __user *user = dst; + const struct ebt_mark_m_info *kern = src; + + if (put_user(kern->mark, &user->mark) || + put_user(kern->mask, &user->mask) || + put_user(kern->invert, &user->invert) || + put_user(kern->bitmask, &user->bitmask)) + return -EFAULT; + return 0; +} +#endif + static struct xt_match ebt_mark_mt_reg __read_mostly = { .name = "mark_m", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_mark_mt, .checkentry = ebt_mark_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), + .matchsize = sizeof(struct ebt_mark_m_info), +#ifdef CONFIG_COMPAT + .compatsize = sizeof(struct compat_ebt_mark_m_info), + .compat_from_user = mark_mt_compat_from_user, + .compat_to_user = mark_mt_compat_to_user, +#endif .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c index 2a63d996dd4..40dbd248b9a 100644 --- a/net/bridge/netfilter/ebt_nflog.c +++ b/net/bridge/netfilter/ebt_nflog.c @@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_nflog_tg, .checkentry = ebt_nflog_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), + .targetsize = sizeof(struct ebt_nflog_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c index 883e96e2a54..e2a07e6cbef 100644 --- a/net/bridge/netfilter/ebt_pkttype.c +++ b/net/bridge/netfilter/ebt_pkttype.c @@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_pkttype_mt, .checkentry = ebt_pkttype_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), + .matchsize = sizeof(struct ebt_pkttype_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index c8a49f7a57b..9be8fbcd370 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c @@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = { (1 << NF_BR_BROUTING), .target = ebt_redirect_tg, .checkentry = ebt_redirect_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), + .targetsize = sizeof(struct ebt_redirect_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index 8d04d4c302b..9c7b520765a 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c @@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = { .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), .target = ebt_snat_tg, .checkentry = ebt_snat_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), + .targetsize = sizeof(struct ebt_nat_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 75e29a9cebd..92a93d36376 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c @@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_stp_mt, .checkentry = ebt_stp_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), + .matchsize = sizeof(struct ebt_stp_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index ce50688a643..c6ac657074a 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_ulog_tg, .checkentry = ebt_ulog_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), + .targetsize = sizeof(struct ebt_ulog_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c index 3dddd489328..be1dd2e1f61 100644 --- a/net/bridge/netfilter/ebt_vlan.c +++ b/net/bridge/netfilter/ebt_vlan.c @@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_vlan_mt, .checkentry = ebt_vlan_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), + .matchsize = sizeof(struct ebt_vlan_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index d32ab13e728..ae3f106c390 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net) static void __net_exit broute_net_exit(struct net *net) { - ebt_unregister_table(net->xt.broute_table); + ebt_unregister_table(net, net->xt.broute_table); } static struct pernet_operations broute_net_ops = { diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 60b1a6ca718..42e6bd09457 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net) static void __net_exit frame_filter_net_exit(struct net *net) { - ebt_unregister_table(net->xt.frame_filter); + ebt_unregister_table(net, net->xt.frame_filter); } static struct pernet_operations frame_filter_net_ops = { diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 4a98804203b..6dc2f878ae0 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net) static void __net_exit frame_nat_net_exit(struct net *net) { - ebt_unregister_table(net->xt.frame_nat); + ebt_unregister_table(net, net->xt.frame_nat); } static struct pernet_operations frame_nat_net_ops = { diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 0b7f262cd14..dfb58056a89 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -33,11 +33,6 @@ #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ "report to author: "format, ## args) /* #define BUGPRINT(format, args...) */ -#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\ - ": out of memory: "format, ## args) -/* #define MEMPRINT(format, args...) */ - - /* * Each cpu has its own set of counters, so there is no need for write_lock in @@ -56,11 +51,37 @@ static DEFINE_MUTEX(ebt_mutex); +#ifdef CONFIG_COMPAT +static void ebt_standard_compat_from_user(void *dst, const void *src) +{ + int v = *(compat_int_t *)src; + + if (v >= 0) + v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); + memcpy(dst, &v, sizeof(v)); +} + +static int ebt_standard_compat_to_user(void __user *dst, const void *src) +{ + compat_int_t cv = *(int *)src; + + if (cv >= 0) + cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); + return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; +} +#endif + + static struct xt_target ebt_standard_target = { .name = "standard", .revision = 0, .family = NFPROTO_BRIDGE, .targetsize = sizeof(int), +#ifdef CONFIG_COMPAT + .compatsize = sizeof(compat_int_t), + .compat_from_user = ebt_standard_compat_from_user, + .compat_to_user = ebt_standard_compat_to_user, +#endif }; static inline int @@ -82,7 +103,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m, return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; } -static inline int ebt_dev_check(char *entry, const struct net_device *device) +static inline int +ebt_dev_check(const char *entry, const struct net_device *device) { int i = 0; const char *devname; @@ -100,8 +122,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device) #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) /* process standard matches */ -static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, - const struct net_device *in, const struct net_device *out) +static inline int +ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h, + const struct net_device *in, const struct net_device *out) { int verdict, i; @@ -156,12 +179,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, int i, nentries; struct ebt_entry *point; struct ebt_counter *counter_base, *cb_base; - struct ebt_entry_target *t; + const struct ebt_entry_target *t; int verdict, sp = 0; struct ebt_chainstack *cs; struct ebt_entries *chaininfo; - char *base; - struct ebt_table_info *private; + const char *base; + const struct ebt_table_info *private; bool hotdrop = false; struct xt_match_param mtpar; struct xt_target_param tgpar; @@ -395,7 +418,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, return 0; } -static int ebt_verify_pointers(struct ebt_replace *repl, +static int ebt_verify_pointers(const struct ebt_replace *repl, struct ebt_table_info *newinfo) { unsigned int limit = repl->entries_size; @@ -442,6 +465,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl, break; if (left < e->next_offset) break; + if (e->next_offset < sizeof(struct ebt_entry)) + return -EINVAL; offset += e->next_offset; } } @@ -466,8 +491,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl, * to parse the userspace data */ static inline int -ebt_check_entry_size_and_hooks(struct ebt_entry *e, - struct ebt_table_info *newinfo, +ebt_check_entry_size_and_hooks(const struct ebt_entry *e, + const struct ebt_table_info *newinfo, unsigned int *n, unsigned int *cnt, unsigned int *totalcnt, unsigned int *udc_cnt) { @@ -561,13 +586,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, } static inline int -ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) +ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.match = m->u.match; par.matchinfo = m->data; par.family = NFPROTO_BRIDGE; @@ -578,13 +604,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) } static inline int -ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) +ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.target = w->u.watcher; par.targinfo = w->data; par.family = NFPROTO_BRIDGE; @@ -595,7 +622,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) } static inline int -ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) +ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) { struct xt_tgdtor_param par; struct ebt_entry_target *t; @@ -605,10 +632,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) /* we're done */ if (cnt && (*cnt)-- == 0) return 1; - EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); - EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); + EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); + EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); + par.net = net; par.target = t->u.target; par.targinfo = t->data; par.family = NFPROTO_BRIDGE; @@ -619,7 +647,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) } static inline int -ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, +ebt_check_entry(struct ebt_entry *e, struct net *net, + const struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, struct ebt_cl_stack *cl_s, unsigned int udc_cnt) { @@ -671,6 +700,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, } i = 0; + mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; mtpar.hook_mask = tgpar.hook_mask = hookmask; @@ -726,9 +756,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, (*cnt)++; return 0; cleanup_watchers: - EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); + EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); cleanup_matches: - EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); + EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); return ret; } @@ -737,12 +767,12 @@ cleanup_matches: * the hook mask for udc tells us from which base chains the udc can be * accessed. This mask is a parameter to the check() functions of the extensions */ -static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, +static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, unsigned int udc_cnt, unsigned int hooknr, char *base) { int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; - struct ebt_entry *e = (struct ebt_entry *)chain->data; - struct ebt_entry_target *t; + const struct ebt_entry *e = (struct ebt_entry *)chain->data; + const struct ebt_entry_target *t; while (pos < nentries || chain_nr != -1) { /* end of udc, go back one 'recursion' step */ @@ -808,7 +838,8 @@ letscontinue: } /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ -static int translate_table(char *name, struct ebt_table_info *newinfo) +static int translate_table(struct net *net, const char *name, + struct ebt_table_info *newinfo) { unsigned int i, j, k, udc_cnt; int ret; @@ -917,17 +948,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo) /* used to know what we need to clean up if something goes wrong */ i = 0; ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); + ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); if (ret != 0) { EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_cleanup_entry, &i); + ebt_cleanup_entry, net, &i); } vfree(cl_s); return ret; } /* called under write_lock */ -static void get_counters(struct ebt_counter *oldcounters, +static void get_counters(const struct ebt_counter *oldcounters, struct ebt_counter *counters, unsigned int nentries) { int i, cpu; @@ -949,90 +980,45 @@ static void get_counters(struct ebt_counter *oldcounters, } } -/* replace the table */ -static int do_replace(struct net *net, void __user *user, unsigned int len) +static int do_replace_finish(struct net *net, struct ebt_replace *repl, + struct ebt_table_info *newinfo) { - int ret, i, countersize; - struct ebt_table_info *newinfo; - struct ebt_replace tmp; - struct ebt_table *t; + int ret, i; struct ebt_counter *counterstmp = NULL; /* used to be able to unlock earlier */ struct ebt_table_info *table; - - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) - return -EFAULT; - - if (len != sizeof(tmp) + tmp.entries_size) { - BUGPRINT("Wrong len argument\n"); - return -EINVAL; - } - - if (tmp.entries_size == 0) { - BUGPRINT("Entries_size never zero\n"); - return -EINVAL; - } - /* overflow check */ - if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) - return -ENOMEM; - if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) - return -ENOMEM; - - countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; - newinfo = vmalloc(sizeof(*newinfo) + countersize); - if (!newinfo) - return -ENOMEM; - - if (countersize) - memset(newinfo->counters, 0, countersize); - - newinfo->entries = vmalloc(tmp.entries_size); - if (!newinfo->entries) { - ret = -ENOMEM; - goto free_newinfo; - } - if (copy_from_user( - newinfo->entries, tmp.entries, tmp.entries_size) != 0) { - BUGPRINT("Couldn't copy entries from userspace\n"); - ret = -EFAULT; - goto free_entries; - } + struct ebt_table *t; /* the user wants counters back the check on the size is done later, when we have the lock */ - if (tmp.num_counters) { - counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); - if (!counterstmp) { - ret = -ENOMEM; - goto free_entries; - } + if (repl->num_counters) { + unsigned long size = repl->num_counters * sizeof(*counterstmp); + counterstmp = vmalloc(size); + if (!counterstmp) + return -ENOMEM; } - else - counterstmp = NULL; - /* this can get initialized by translate_table() */ newinfo->chainstack = NULL; - ret = ebt_verify_pointers(&tmp, newinfo); + ret = ebt_verify_pointers(repl, newinfo); if (ret != 0) goto free_counterstmp; - ret = translate_table(tmp.name, newinfo); + ret = translate_table(net, repl->name, newinfo); if (ret != 0) goto free_counterstmp; - t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); + t = find_table_lock(net, repl->name, &ret, &ebt_mutex); if (!t) { ret = -ENOENT; goto free_iterate; } /* the table doesn't like it */ - if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) + if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) goto free_unlock; - if (tmp.num_counters && tmp.num_counters != t->private->nentries) { + if (repl->num_counters && repl->num_counters != t->private->nentries) { BUGPRINT("Wrong nr. of counters requested\n"); ret = -EINVAL; goto free_unlock; @@ -1048,7 +1034,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) module_put(t->me); /* we need an atomic snapshot of the counters */ write_lock_bh(&t->lock); - if (tmp.num_counters) + if (repl->num_counters) get_counters(t->private->counters, counterstmp, t->private->nentries); @@ -1059,10 +1045,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) allocation. Only reason why this is done is because this way the lock is held only once, while this doesn't bring the kernel into a dangerous state. */ - if (tmp.num_counters && - copy_to_user(tmp.counters, counterstmp, - tmp.num_counters * sizeof(struct ebt_counter))) { - BUGPRINT("Couldn't copy counters to userspace\n"); + if (repl->num_counters && + copy_to_user(repl->counters, counterstmp, + repl->num_counters * sizeof(struct ebt_counter))) { ret = -EFAULT; } else @@ -1070,7 +1055,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); vfree(table->entries); if (table->chainstack) { @@ -1087,7 +1072,7 @@ free_unlock: mutex_unlock(&ebt_mutex); free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ @@ -1096,6 +1081,59 @@ free_counterstmp: vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } + return ret; +} + +/* replace the table */ +static int do_replace(struct net *net, const void __user *user, + unsigned int len) +{ + int ret, countersize; + struct ebt_table_info *newinfo; + struct ebt_replace tmp; + + if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + return -EFAULT; + + if (len != sizeof(tmp) + tmp.entries_size) { + BUGPRINT("Wrong len argument\n"); + return -EINVAL; + } + + if (tmp.entries_size == 0) { + BUGPRINT("Entries_size never zero\n"); + return -EINVAL; + } + /* overflow check */ + if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / + NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) + return -ENOMEM; + if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) + return -ENOMEM; + + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; + newinfo = vmalloc(sizeof(*newinfo) + countersize); + if (!newinfo) + return -ENOMEM; + + if (countersize) + memset(newinfo->counters, 0, countersize); + + newinfo->entries = vmalloc(tmp.entries_size); + if (!newinfo->entries) { + ret = -ENOMEM; + goto free_newinfo; + } + if (copy_from_user( + newinfo->entries, tmp.entries, tmp.entries_size) != 0) { + BUGPRINT("Couldn't copy entries from userspace\n"); + ret = -EFAULT; + goto free_entries; + } + + ret = do_replace_finish(net, &tmp, newinfo); + if (ret == 0) + return ret; free_entries: vfree(newinfo->entries); free_newinfo: @@ -1154,7 +1192,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) newinfo->hook_entry[i] = p + ((char *)repl->hook_entry[i] - repl->entries); } - ret = translate_table(repl->name, newinfo); + ret = translate_table(net, repl->name, newinfo); if (ret != 0) { BUGPRINT("Translate_table failed\n"); goto free_chainstack; @@ -1204,7 +1242,7 @@ out: return ERR_PTR(ret); } -void ebt_unregister_table(struct ebt_table *table) +void ebt_unregister_table(struct net *net, struct ebt_table *table) { int i; @@ -1216,7 +1254,7 @@ void ebt_unregister_table(struct ebt_table *table) list_del(&table->list); mutex_unlock(&ebt_mutex); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); @@ -1230,39 +1268,33 @@ void ebt_unregister_table(struct ebt_table *table) } /* userspace just supplied us with counters */ -static int update_counters(struct net *net, void __user *user, unsigned int len) +static int do_update_counters(struct net *net, const char *name, + struct ebt_counter __user *counters, + unsigned int num_counters, + const void __user *user, unsigned int len) { int i, ret; struct ebt_counter *tmp; - struct ebt_replace hlp; struct ebt_table *t; - if (copy_from_user(&hlp, user, sizeof(hlp))) - return -EFAULT; - - if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) - return -EINVAL; - if (hlp.num_counters == 0) + if (num_counters == 0) return -EINVAL; - if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { - MEMPRINT("Update_counters && nomemory\n"); + tmp = vmalloc(num_counters * sizeof(*tmp)); + if (!tmp) return -ENOMEM; - } - t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); + t = find_table_lock(net, name, &ret, &ebt_mutex); if (!t) goto free_tmp; - if (hlp.num_counters != t->private->nentries) { + if (num_counters != t->private->nentries) { BUGPRINT("Wrong nr of counters\n"); ret = -EINVAL; goto unlock_mutex; } - if ( copy_from_user(tmp, hlp.counters, - hlp.num_counters * sizeof(struct ebt_counter)) ) { - BUGPRINT("Updata_counters && !cfu\n"); + if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { ret = -EFAULT; goto unlock_mutex; } @@ -1271,7 +1303,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len) write_lock_bh(&t->lock); /* we add to the counters of the first cpu */ - for (i = 0; i < hlp.num_counters; i++) { + for (i = 0; i < num_counters; i++) { t->private->counters[i].pcnt += tmp[i].pcnt; t->private->counters[i].bcnt += tmp[i].bcnt; } @@ -1285,8 +1317,23 @@ free_tmp: return ret; } -static inline int ebt_make_matchname(struct ebt_entry_match *m, - char *base, char __user *ubase) +static int update_counters(struct net *net, const void __user *user, + unsigned int len) +{ + struct ebt_replace hlp; + + if (copy_from_user(&hlp, user, sizeof(hlp))) + return -EFAULT; + + if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) + return -EINVAL; + + return do_update_counters(net, hlp.name, hlp.counters, + hlp.num_counters, user, len); +} + +static inline int ebt_make_matchname(const struct ebt_entry_match *m, + const char *base, char __user *ubase) { char __user *hlp = ubase + ((char *)m - base); if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) @@ -1294,8 +1341,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m, return 0; } -static inline int ebt_make_watchername(struct ebt_entry_watcher *w, - char *base, char __user *ubase) +static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, + const char *base, char __user *ubase) { char __user *hlp = ubase + ((char *)w - base); if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) @@ -1303,11 +1350,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w, return 0; } -static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) +static inline int +ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase) { int ret; char __user *hlp; - struct ebt_entry_target *t; + const struct ebt_entry_target *t; if (e->bitmask == 0) return 0; @@ -1326,13 +1374,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u return 0; } +static int copy_counters_to_user(struct ebt_table *t, + const struct ebt_counter *oldcounters, + void __user *user, unsigned int num_counters, + unsigned int nentries) +{ + struct ebt_counter *counterstmp; + int ret = 0; + + /* userspace might not need the counters */ + if (num_counters == 0) + return 0; + + if (num_counters != nentries) { + BUGPRINT("Num_counters wrong\n"); + return -EINVAL; + } + + counterstmp = vmalloc(nentries * sizeof(*counterstmp)); + if (!counterstmp) + return -ENOMEM; + + write_lock_bh(&t->lock); + get_counters(oldcounters, counterstmp, nentries); + write_unlock_bh(&t->lock); + + if (copy_to_user(user, counterstmp, + nentries * sizeof(struct ebt_counter))) + ret = -EFAULT; + vfree(counterstmp); + return ret; +} + /* called with ebt_mutex locked */ static int copy_everything_to_user(struct ebt_table *t, void __user *user, - int *len, int cmd) + const int *len, int cmd) { struct ebt_replace tmp; - struct ebt_counter *counterstmp, *oldcounters; + const struct ebt_counter *oldcounters; unsigned int entries_size, nentries; + int ret; char *entries; if (cmd == EBT_SO_GET_ENTRIES) { @@ -1347,16 +1428,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user, oldcounters = t->table->counters; } - if (copy_from_user(&tmp, user, sizeof(tmp))) { - BUGPRINT("Cfu didn't work\n"); + if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; - } if (*len != sizeof(struct ebt_replace) + entries_size + - (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { - BUGPRINT("Wrong size\n"); + (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) return -EINVAL; - } if (tmp.nentries != nentries) { BUGPRINT("Nentries wrong\n"); @@ -1368,29 +1445,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user, return -EINVAL; } - /* userspace might not need the counters */ - if (tmp.num_counters) { - if (tmp.num_counters != nentries) { - BUGPRINT("Num_counters wrong\n"); - return -EINVAL; - } - counterstmp = vmalloc(nentries * sizeof(*counterstmp)); - if (!counterstmp) { - MEMPRINT("Couldn't copy counters, out of memory\n"); - return -ENOMEM; - } - write_lock_bh(&t->lock); - get_counters(oldcounters, counterstmp, nentries); - write_unlock_bh(&t->lock); - - if (copy_to_user(tmp.counters, counterstmp, - nentries * sizeof(struct ebt_counter))) { - BUGPRINT("Couldn't copy counters to userspace\n"); - vfree(counterstmp); - return -EFAULT; - } - vfree(counterstmp); - } + ret = copy_counters_to_user(t, oldcounters, tmp.counters, + tmp.num_counters, nentries); + if (ret) + return ret; if (copy_to_user(tmp.entries, entries, entries_size)) { BUGPRINT("Couldn't copy entries to userspace\n"); @@ -1418,7 +1476,7 @@ static int do_ebt_set_ctl(struct sock *sk, break; default: ret = -EINVAL; - } + } return ret; } @@ -1478,15 +1536,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) return ret; } +#ifdef CONFIG_COMPAT +/* 32 bit-userspace compatibility definitions. */ +struct compat_ebt_replace { + char name[EBT_TABLE_MAXNAMELEN]; + compat_uint_t valid_hooks; + compat_uint_t nentries; + compat_uint_t entries_size; + /* start of the chains */ + compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; + /* nr of counters userspace expects back */ + compat_uint_t num_counters; + /* where the kernel will put the old counters. */ + compat_uptr_t counters; + compat_uptr_t entries; +}; + +/* struct ebt_entry_match, _target and _watcher have same layout */ +struct compat_ebt_entry_mwt { + union { + char name[EBT_FUNCTION_MAXNAMELEN]; + compat_uptr_t ptr; + } u; + compat_uint_t match_size; + compat_uint_t data[0]; +}; + +/* account for possible padding between match_size and ->data */ +static int ebt_compat_entry_padsize(void) +{ + BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < + COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); + return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - + COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); +} + +static int ebt_compat_match_offset(const struct xt_match *match, + unsigned int userlen) +{ + /* + * ebt_among needs special handling. The kernel .matchsize is + * set to -1 at registration time; at runtime an EBT_ALIGN()ed + * value is expected. + * Example: userspace sends 4500, ebt_among.c wants 4504. + */ + if (unlikely(match->matchsize == -1)) + return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); + return xt_compat_match_offset(match); +} + +static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, + unsigned int *size) +{ + const struct xt_match *match = m->u.match; + struct compat_ebt_entry_mwt __user *cm = *dstptr; + int off = ebt_compat_match_offset(match, m->match_size); + compat_uint_t msize = m->match_size - off; + + BUG_ON(off >= m->match_size); + + if (copy_to_user(cm->u.name, match->name, + strlen(match->name) + 1) || put_user(msize, &cm->match_size)) + return -EFAULT; + + if (match->compat_to_user) { + if (match->compat_to_user(cm->data, m->data)) + return -EFAULT; + } else if (copy_to_user(cm->data, m->data, msize)) + return -EFAULT; + + *size -= ebt_compat_entry_padsize() + off; + *dstptr = cm->data; + *dstptr += msize; + return 0; +} + +static int compat_target_to_user(struct ebt_entry_target *t, + void __user **dstptr, + unsigned int *size) +{ + const struct xt_target *target = t->u.target; + struct compat_ebt_entry_mwt __user *cm = *dstptr; + int off = xt_compat_target_offset(target); + compat_uint_t tsize = t->target_size - off; + + BUG_ON(off >= t->target_size); + + if (copy_to_user(cm->u.name, target->name, + strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) + return -EFAULT; + + if (target->compat_to_user) { + if (target->compat_to_user(cm->data, t->data)) + return -EFAULT; + } else if (copy_to_user(cm->data, t->data, tsize)) + return -EFAULT; + + *size -= ebt_compat_entry_padsize() + off; + *dstptr = cm->data; + *dstptr += tsize; + return 0; +} + +static int compat_watcher_to_user(struct ebt_entry_watcher *w, + void __user **dstptr, + unsigned int *size) +{ + return compat_target_to_user((struct ebt_entry_target *)w, + dstptr, size); +} + +static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, + unsigned int *size) +{ + struct ebt_entry_target *t; + struct ebt_entry __user *ce; + u32 watchers_offset, target_offset, next_offset; + compat_uint_t origsize; + int ret; + + if (e->bitmask == 0) { + if (*size < sizeof(struct ebt_entries)) + return -EINVAL; + if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) + return -EFAULT; + + *dstptr += sizeof(struct ebt_entries); + *size -= sizeof(struct ebt_entries); + return 0; + } + + if (*size < sizeof(*ce)) + return -EINVAL; + + ce = (struct ebt_entry __user *)*dstptr; + if (copy_to_user(ce, e, sizeof(*ce))) + return -EFAULT; + + origsize = *size; + *dstptr += sizeof(*ce); + + ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); + if (ret) + return ret; + watchers_offset = e->watchers_offset - (origsize - *size); + + ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); + if (ret) + return ret; + target_offset = e->target_offset - (origsize - *size); + + t = (struct ebt_entry_target *) ((char *) e + e->target_offset); + + ret = compat_target_to_user(t, dstptr, size); + if (ret) + return ret; + next_offset = e->next_offset - (origsize - *size); + + if (put_user(watchers_offset, &ce->watchers_offset) || + put_user(target_offset, &ce->target_offset) || + put_user(next_offset, &ce->next_offset)) + return -EFAULT; + + *size -= sizeof(*ce); + return 0; +} + +static int compat_calc_match(struct ebt_entry_match *m, int *off) +{ + *off += ebt_compat_match_offset(m->u.match, m->match_size); + *off += ebt_compat_entry_padsize(); + return 0; +} + +static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) +{ + *off += xt_compat_target_offset(w->u.watcher); + *off += ebt_compat_entry_padsize(); + return 0; +} + +static int compat_calc_entry(const struct ebt_entry *e, + const struct ebt_table_info *info, + const void *base, + struct compat_ebt_replace *newinfo) +{ + const struct ebt_entry_target *t; + unsigned int entry_offset; + int off, ret, i; + + if (e->bitmask == 0) + return 0; + + off = 0; + entry_offset = (void *)e - base; + + EBT_MATCH_ITERATE(e, compat_calc_match, &off); + EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); + + t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); + + off += xt_compat_target_offset(t->u.target); + off += ebt_compat_entry_padsize(); + + newinfo->entries_size -= off; + + ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); + if (ret) + return ret; + + for (i = 0; i < NF_BR_NUMHOOKS; i++) { + const void *hookptr = info->hook_entry[i]; + if (info->hook_entry[i] && + (e < (struct ebt_entry *)(base - hookptr))) { + newinfo->hook_entry[i] -= off; + pr_debug("0x%08X -> 0x%08X\n", + newinfo->hook_entry[i] + off, + newinfo->hook_entry[i]); + } + } + + return 0; +} + + +static int compat_table_info(const struct ebt_table_info *info, + struct compat_ebt_replace *newinfo) +{ + unsigned int size = info->entries_size; + const void *entries = info->entries; + + newinfo->entries_size = size; + + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, + entries, newinfo); +} + +static int compat_copy_everything_to_user(struct ebt_table *t, + void __user *user, int *len, int cmd) +{ + struct compat_ebt_replace repl, tmp; + struct ebt_counter *oldcounters; + struct ebt_table_info tinfo; + int ret; + void __user *pos; + + memset(&tinfo, 0, sizeof(tinfo)); + + if (cmd == EBT_SO_GET_ENTRIES) { + tinfo.entries_size = t->private->entries_size; + tinfo.nentries = t->private->nentries; + tinfo.entries = t->private->entries; + oldcounters = t->private->counters; + } else { + tinfo.entries_size = t->table->entries_size; + tinfo.nentries = t->table->nentries; + tinfo.entries = t->table->entries; + oldcounters = t->table->counters; + } + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + if (tmp.nentries != tinfo.nentries || + (tmp.num_counters && tmp.num_counters != tinfo.nentries)) + return -EINVAL; + + memcpy(&repl, &tmp, sizeof(repl)); + if (cmd == EBT_SO_GET_ENTRIES) + ret = compat_table_info(t->private, &repl); + else + ret = compat_table_info(&tinfo, &repl); + if (ret) + return ret; + + if (*len != sizeof(tmp) + repl.entries_size + + (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { + pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", + *len, tinfo.entries_size, repl.entries_size); + return -EINVAL; + } + + /* userspace might not need the counters */ + ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), + tmp.num_counters, tinfo.nentries); + if (ret) + return ret; + + pos = compat_ptr(tmp.entries); + return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, + compat_copy_entry_to_user, &pos, &tmp.entries_size); +} + +struct ebt_entries_buf_state { + char *buf_kern_start; /* kernel buffer to copy (translated) data to */ + u32 buf_kern_len; /* total size of kernel buffer */ + u32 buf_kern_offset; /* amount of data copied so far */ + u32 buf_user_offset; /* read position in userspace buffer */ +}; + +static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) +{ + state->buf_kern_offset += sz; + return state->buf_kern_offset >= sz ? 0 : -EINVAL; +} + +static int ebt_buf_add(struct ebt_entries_buf_state *state, + void *data, unsigned int sz) +{ + if (state->buf_kern_start == NULL) + goto count_only; + + BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); + + memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); + + count_only: + state->buf_user_offset += sz; + return ebt_buf_count(state, sz); +} + +static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) +{ + char *b = state->buf_kern_start; + + BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); + + if (b != NULL && sz > 0) + memset(b + state->buf_kern_offset, 0, sz); + /* do not adjust ->buf_user_offset here, we added kernel-side padding */ + return ebt_buf_count(state, sz); +} + +enum compat_mwt { + EBT_COMPAT_MATCH, + EBT_COMPAT_WATCHER, + EBT_COMPAT_TARGET, +}; + +static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, + enum compat_mwt compat_mwt, + struct ebt_entries_buf_state *state, + const unsigned char *base) +{ + char name[EBT_FUNCTION_MAXNAMELEN]; + struct xt_match *match; + struct xt_target *wt; + void *dst = NULL; + int off, pad = 0, ret = 0; + unsigned int size_kern, entry_offset, match_size = mwt->match_size; + + strlcpy(name, mwt->u.name, sizeof(name)); + + if (state->buf_kern_start) + dst = state->buf_kern_start + state->buf_kern_offset; + + entry_offset = (unsigned char *) mwt - base; + switch (compat_mwt) { + case EBT_COMPAT_MATCH: + match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, + name, 0), "ebt_%s", name); + if (match == NULL) + return -ENOENT; + if (IS_ERR(match)) + return PTR_ERR(match); + + off = ebt_compat_match_offset(match, match_size); + if (dst) { + if (match->compat_from_user) + match->compat_from_user(dst, mwt->data); + else + memcpy(dst, mwt->data, match_size); + } + + size_kern = match->matchsize; + if (unlikely(size_kern == -1)) + size_kern = match_size; + module_put(match->me); + break; + case EBT_COMPAT_WATCHER: /* fallthrough */ + case EBT_COMPAT_TARGET: + wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE, + name, 0), "ebt_%s", name); + if (wt == NULL) + return -ENOENT; + if (IS_ERR(wt)) + return PTR_ERR(wt); + off = xt_compat_target_offset(wt); + + if (dst) { + if (wt->compat_from_user) + wt->compat_from_user(dst, mwt->data); + else + memcpy(dst, mwt->data, match_size); + } + + size_kern = wt->targetsize; + module_put(wt->me); + break; + } + + if (!dst) { + ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, + off + ebt_compat_entry_padsize()); + if (ret < 0) + return ret; + } + + state->buf_kern_offset += match_size + off; + state->buf_user_offset += match_size; + pad = XT_ALIGN(size_kern) - size_kern; + + if (pad > 0 && dst) { + BUG_ON(state->buf_kern_len <= pad); + BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); + memset(dst + size_kern, 0, pad); + } + return off + match_size; +} + +/* + * return size of all matches, watchers or target, including necessary + * alignment and padding. + */ +static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, + unsigned int size_left, enum compat_mwt type, + struct ebt_entries_buf_state *state, const void *base) +{ + int growth = 0; + char *buf; + + if (size_left == 0) + return 0; + + buf = (char *) match32; + + while (size_left >= sizeof(*match32)) { + struct ebt_entry_match *match_kern; + int ret; + + match_kern = (struct ebt_entry_match *) state->buf_kern_start; + if (match_kern) { + char *tmp; + tmp = state->buf_kern_start + state->buf_kern_offset; + match_kern = (struct ebt_entry_match *) tmp; + } + ret = ebt_buf_add(state, buf, sizeof(*match32)); + if (ret < 0) + return ret; + size_left -= sizeof(*match32); + + /* add padding before match->data (if any) */ + ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); + if (ret < 0) + return ret; + + if (match32->match_size > size_left) + return -EINVAL; + + size_left -= match32->match_size; + + ret = compat_mtw_from_user(match32, type, state, base); + if (ret < 0) + return ret; + + BUG_ON(ret < match32->match_size); + growth += ret - match32->match_size; + growth += ebt_compat_entry_padsize(); + + buf += sizeof(*match32); + buf += match32->match_size; + + if (match_kern) + match_kern->match_size = ret; + + WARN_ON(type == EBT_COMPAT_TARGET && size_left); + match32 = (struct compat_ebt_entry_mwt *) buf; + } + + return growth; +} + +#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct compat_ebt_entry_mwt *__watcher; \ + \ + for (__i = e->watchers_offset; \ + __i < (e)->target_offset; \ + __i += __watcher->watcher_size + \ + sizeof(struct compat_ebt_entry_mwt)) { \ + __watcher = (void *)(e) + __i; \ + __ret = fn(__watcher , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->target_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct compat_ebt_entry_mwt *__match; \ + \ + for (__i = sizeof(struct ebt_entry); \ + __i < (e)->watchers_offset; \ + __i += __match->match_size + \ + sizeof(struct compat_ebt_entry_mwt)) { \ + __match = (void *)(e) + __i; \ + __ret = fn(__match , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->watchers_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +/* called for all ebt_entry structures. */ +static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, + unsigned int *total, + struct ebt_entries_buf_state *state) +{ + unsigned int i, j, startoff, new_offset = 0; + /* stores match/watchers/targets & offset of next struct ebt_entry: */ + unsigned int offsets[4]; + unsigned int *offsets_update = NULL; + int ret; + char *buf_start; + + if (*total < sizeof(struct ebt_entries)) + return -EINVAL; + + if (!entry->bitmask) { + *total -= sizeof(struct ebt_entries); + return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); + } + if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) + return -EINVAL; + + startoff = state->buf_user_offset; + /* pull in most part of ebt_entry, it does not need to be changed. */ + ret = ebt_buf_add(state, entry, + offsetof(struct ebt_entry, watchers_offset)); + if (ret < 0) + return ret; + + offsets[0] = sizeof(struct ebt_entry); /* matches come first */ + memcpy(&offsets[1], &entry->watchers_offset, + sizeof(offsets) - sizeof(offsets[0])); + + if (state->buf_kern_start) { + buf_start = state->buf_kern_start + state->buf_kern_offset; + offsets_update = (unsigned int *) buf_start; + } + ret = ebt_buf_add(state, &offsets[1], + sizeof(offsets) - sizeof(offsets[0])); + if (ret < 0) + return ret; + buf_start = (char *) entry; + /* + * 0: matches offset, always follows ebt_entry. + * 1: watchers offset, from ebt_entry structure + * 2: target offset, from ebt_entry structure + * 3: next ebt_entry offset, from ebt_entry structure + * + * offsets are relative to beginning of struct ebt_entry (i.e., 0). + */ + for (i = 0, j = 1 ; j < 4 ; j++, i++) { + struct compat_ebt_entry_mwt *match32; + unsigned int size; + char *buf = buf_start; + + buf = buf_start + offsets[i]; + if (offsets[i] > offsets[j]) + return -EINVAL; + + match32 = (struct compat_ebt_entry_mwt *) buf; + size = offsets[j] - offsets[i]; + ret = ebt_size_mwt(match32, size, i, state, base); + if (ret < 0) + return ret; + new_offset += ret; + if (offsets_update && new_offset) { + pr_debug("ebtables: change offset %d to %d\n", + offsets_update[i], offsets[j] + new_offset); + offsets_update[i] = offsets[j] + new_offset; + } + } + + startoff = state->buf_user_offset - startoff; + + BUG_ON(*total < startoff); + *total -= startoff; + return 0; +} + +/* + * repl->entries_size is the size of the ebt_entry blob in userspace. + * It might need more memory when copied to a 64 bit kernel in case + * userspace is 32-bit. So, first task: find out how much memory is needed. + * + * Called before validation is performed. + */ +static int compat_copy_entries(unsigned char *data, unsigned int size_user, + struct ebt_entries_buf_state *state) +{ + unsigned int size_remaining = size_user; + int ret; + + ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, + &size_remaining, state); + if (ret < 0) + return ret; + + WARN_ON(size_remaining); + return state->buf_kern_offset; +} + + +static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, + void __user *user, unsigned int len) +{ + struct compat_ebt_replace tmp; + int i; + + if (len < sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + if (len != sizeof(tmp) + tmp.entries_size) + return -EINVAL; + + if (tmp.entries_size == 0) + return -EINVAL; + + if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / + NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) + return -ENOMEM; + if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) + return -ENOMEM; + + memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); + + /* starting with hook_entry, 32 vs. 64 bit structures are different */ + for (i = 0; i < NF_BR_NUMHOOKS; i++) + repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); + + repl->num_counters = tmp.num_counters; + repl->counters = compat_ptr(tmp.counters); + repl->entries = compat_ptr(tmp.entries); + return 0; +} + +static int compat_do_replace(struct net *net, void __user *user, + unsigned int len) +{ + int ret, i, countersize, size64; + struct ebt_table_info *newinfo; + struct ebt_replace tmp; + struct ebt_entries_buf_state state; + void *entries_tmp; + + ret = compat_copy_ebt_replace_from_user(&tmp, user, len); + if (ret) { + /* try real handler in case userland supplied needed padding */ + if (ret == -EINVAL && do_replace(net, user, len) == 0) + ret = 0; + return ret; + } + + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; + newinfo = vmalloc(sizeof(*newinfo) + countersize); + if (!newinfo) + return -ENOMEM; + + if (countersize) + memset(newinfo->counters, 0, countersize); + + memset(&state, 0, sizeof(state)); + + newinfo->entries = vmalloc(tmp.entries_size); + if (!newinfo->entries) { + ret = -ENOMEM; + goto free_newinfo; + } + if (copy_from_user( + newinfo->entries, tmp.entries, tmp.entries_size) != 0) { + ret = -EFAULT; + goto free_entries; + } + + entries_tmp = newinfo->entries; + + xt_compat_lock(NFPROTO_BRIDGE); + + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); + if (ret < 0) + goto out_unlock; + + pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", + tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, + xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); + + size64 = ret; + newinfo->entries = vmalloc(size64); + if (!newinfo->entries) { + vfree(entries_tmp); + ret = -ENOMEM; + goto out_unlock; + } + + memset(&state, 0, sizeof(state)); + state.buf_kern_start = newinfo->entries; + state.buf_kern_len = size64; + + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); + BUG_ON(ret < 0); /* parses same data again */ + + vfree(entries_tmp); + tmp.entries_size = size64; + + for (i = 0; i < NF_BR_NUMHOOKS; i++) { + char __user *usrptr; + if (tmp.hook_entry[i]) { + unsigned int delta; + usrptr = (char __user *) tmp.hook_entry[i]; + delta = usrptr - tmp.entries; + usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); + tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; + } + } + + xt_compat_flush_offsets(NFPROTO_BRIDGE); + xt_compat_unlock(NFPROTO_BRIDGE); + + ret = do_replace_finish(net, &tmp, newinfo); + if (ret == 0) + return ret; +free_entries: + vfree(newinfo->entries); +free_newinfo: + vfree(newinfo); + return ret; +out_unlock: + xt_compat_flush_offsets(NFPROTO_BRIDGE); + xt_compat_unlock(NFPROTO_BRIDGE); + goto free_entries; +} + +static int compat_update_counters(struct net *net, void __user *user, + unsigned int len) +{ + struct compat_ebt_replace hlp; + + if (copy_from_user(&hlp, user, sizeof(hlp))) + return -EFAULT; + + /* try real handler in case userland supplied needed padding */ + if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) + return update_counters(net, user, len); + + return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), + hlp.num_counters, user, len); +} + +static int compat_do_ebt_set_ctl(struct sock *sk, + int cmd, void __user *user, unsigned int len) +{ + int ret; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case EBT_SO_SET_ENTRIES: + ret = compat_do_replace(sock_net(sk), user, len); + break; + case EBT_SO_SET_COUNTERS: + ret = compat_update_counters(sock_net(sk), user, len); + break; + default: + ret = -EINVAL; + } + return ret; +} + +static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, + void __user *user, int *len) +{ + int ret; + struct compat_ebt_replace tmp; + struct ebt_table *t; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* try real handler in case userland supplied needed padding */ + if ((cmd == EBT_SO_GET_INFO || + cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) + return do_ebt_get_ctl(sk, cmd, user, len); + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex); + if (!t) + return ret; + + xt_compat_lock(NFPROTO_BRIDGE); + switch (cmd) { + case EBT_SO_GET_INFO: + tmp.nentries = t->private->nentries; + ret = compat_table_info(t->private, &tmp); + if (ret) + goto out; + tmp.valid_hooks = t->valid_hooks; + + if (copy_to_user(user, &tmp, *len) != 0) { + ret = -EFAULT; + break; + } + ret = 0; + break; + case EBT_SO_GET_INIT_INFO: + tmp.nentries = t->table->nentries; + tmp.entries_size = t->table->entries_size; + tmp.valid_hooks = t->table->valid_hooks; + + if (copy_to_user(user, &tmp, *len) != 0) { + ret = -EFAULT; + break; + } + ret = 0; + break; + case EBT_SO_GET_ENTRIES: + case EBT_SO_GET_INIT_ENTRIES: + /* + * try real handler first in case of userland-side padding. + * in case we are dealing with an 'ordinary' 32 bit binary + * without 64bit compatibility padding, this will fail right + * after copy_from_user when the *len argument is validated. + * + * the compat_ variant needs to do one pass over the kernel + * data set to adjust for size differences before it the check. + */ + if (copy_everything_to_user(t, user, len, cmd) == 0) + ret = 0; + else + ret = compat_copy_everything_to_user(t, user, len, cmd); + break; + default: + ret = -EINVAL; + } + out: + xt_compat_flush_offsets(NFPROTO_BRIDGE); + xt_compat_unlock(NFPROTO_BRIDGE); + mutex_unlock(&ebt_mutex); + return ret; +} +#endif + static struct nf_sockopt_ops ebt_sockopts = { .pf = PF_INET, .set_optmin = EBT_BASE_CTL, .set_optmax = EBT_SO_SET_MAX + 1, .set = do_ebt_set_ctl, +#ifdef CONFIG_COMPAT + .compat_set = compat_do_ebt_set_ctl, +#endif .get_optmin = EBT_BASE_CTL, .get_optmax = EBT_SO_GET_MAX + 1, .get = do_ebt_get_ctl, +#ifdef CONFIG_COMPAT + .compat_get = compat_do_ebt_get_ctl, +#endif .owner = THIS_MODULE, }; |