diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/Makefile | 1 | ||||
-rw-r--r-- | net/core/dev.c | 153 | ||||
-rw-r--r-- | net/core/filter.c | 1567 | ||||
-rw-r--r-- | net/core/flow.c | 132 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 24 | ||||
-rw-r--r-- | net/core/neighbour.c | 9 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 22 | ||||
-rw-r--r-- | net/core/netpoll.c | 587 | ||||
-rw-r--r-- | net/core/pktgen.c | 32 | ||||
-rw-r--r-- | net/core/ptp_classifier.c | 141 | ||||
-rw-r--r-- | net/core/request_sock.c | 1 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 113 | ||||
-rw-r--r-- | net/core/skbuff.c | 166 | ||||
-rw-r--r-- | net/core/sock_diag.c | 23 | ||||
-rw-r--r-- | net/core/timestamping.c | 19 |
15 files changed, 1813 insertions, 1177 deletions
diff --git a/net/core/Makefile b/net/core/Makefile index 9628c20acff..826b925aa45 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -21,5 +21,6 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o obj-$(CONFIG_TRACEPOINTS) += net-traces.o obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o +obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o diff --git a/net/core/dev.c b/net/core/dev.c index 4a91591b30a..757063420ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1245,7 +1245,7 @@ static int __dev_open(struct net_device *dev) * If we don't do this there is a chance ndo_poll_controller * or ndo_poll may be running while we open the device */ - netpoll_rx_disable(dev); + netpoll_poll_disable(dev); ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); ret = notifier_to_errno(ret); @@ -1260,7 +1260,7 @@ static int __dev_open(struct net_device *dev) if (!ret && ops->ndo_open) ret = ops->ndo_open(dev); - netpoll_rx_enable(dev); + netpoll_poll_enable(dev); if (ret) clear_bit(__LINK_STATE_START, &dev->state); @@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head) might_sleep(); list_for_each_entry(dev, head, close_list) { + /* Temporarily disable netpoll until the interface is down */ + netpoll_poll_disable(dev); + call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); clear_bit(__LINK_STATE_START, &dev->state); @@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head) dev->flags &= ~IFF_UP; net_dmaengine_put(); + netpoll_poll_enable(dev); } return 0; @@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev) int retval; LIST_HEAD(single); - /* Temporarily disable netpoll until the interface is down */ - netpoll_rx_disable(dev); - list_add(&dev->close_list, &single); retval = __dev_close_many(&single); list_del(&single); - netpoll_rx_enable(dev); return retval; } @@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev) if (dev->flags & IFF_UP) { LIST_HEAD(single); - /* Block netpoll rx while the interface is going down */ - netpoll_rx_disable(dev); - list_add(&dev->close_list, &single); dev_close_many(&single); list_del(&single); - - netpoll_rx_enable(dev); } return 0; } @@ -1645,8 +1640,7 @@ static inline void net_timestamp_set(struct sk_buff *skb) __net_timestamp(SKB); \ } \ -static inline bool is_skb_forwardable(struct net_device *dev, - struct sk_buff *skb) +bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) { unsigned int len; @@ -1665,6 +1659,7 @@ static inline bool is_skb_forwardable(struct net_device *dev, return false; } +EXPORT_SYMBOL_GPL(is_skb_forwardable); /** * dev_forward_skb - loopback an skb to another netif @@ -2885,6 +2880,7 @@ recursion_alert: rc = -ENETDOWN; rcu_read_unlock_bh(); + atomic_long_inc(&dev->tx_dropped); kfree_skb(skb); return rc; out: @@ -2957,7 +2953,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, flow_table = rcu_dereference(rxqueue->rps_flow_table); if (!flow_table) goto out; - flow_id = skb->rxhash & flow_table->mask; + flow_id = skb_get_hash(skb) & flow_table->mask; rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, rxq_index, flow_id); if (rc < 0) @@ -2991,6 +2987,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_sock_flow_table *sock_flow_table; int cpu = -1; u16 tcpu; + u32 hash; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); @@ -3019,7 +3016,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } skb_reset_network_header(skb); - if (!skb_get_hash(skb)) + hash = skb_get_hash(skb); + if (!hash) goto done; flow_table = rcu_dereference(rxqueue->rps_flow_table); @@ -3028,11 +3026,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, u16 next_cpu; struct rps_dev_flow *rflow; - rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; + rflow = &flow_table->flows[hash & flow_table->mask]; tcpu = rflow->cpu; - next_cpu = sock_flow_table->ents[skb->rxhash & - sock_flow_table->mask]; + next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask]; /* * If the desired CPU (where last recvmsg was done) is @@ -3061,7 +3058,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } if (map) { - tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; + tcpu = map->cpus[((u64) hash * map->len) >> 32]; if (cpu_online(tcpu)) { cpu = tcpu; @@ -3236,10 +3233,6 @@ static int netif_rx_internal(struct sk_buff *skb) { int ret; - /* if netpoll wants it, pretend we never saw it */ - if (netpoll_rx(skb)) - return NET_RX_DROP; - net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); @@ -3500,11 +3493,11 @@ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); static bool skb_pfmemalloc_protocol(struct sk_buff *skb) { switch (skb->protocol) { - case __constant_htons(ETH_P_ARP): - case __constant_htons(ETH_P_IP): - case __constant_htons(ETH_P_IPV6): - case __constant_htons(ETH_P_8021Q): - case __constant_htons(ETH_P_8021AD): + case htons(ETH_P_ARP): + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): return true; default: return false; @@ -3525,10 +3518,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) trace_netif_receive_skb(skb); - /* if we've gotten here through NAPI, check netpoll */ - if (netpoll_receive_skb(skb)) - goto out; - orig_dev = skb->dev; skb_reset_network_header(skb); @@ -3655,7 +3644,6 @@ drop: unlock: rcu_read_unlock(); -out: return ret; } @@ -3845,10 +3833,10 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) diffs |= p->vlan_tci ^ skb->vlan_tci; if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), - skb_gro_mac_header(skb)); + skb_mac_header(skb)); else if (!diffs) diffs = memcmp(skb_mac_header(p), - skb_gro_mac_header(skb), + skb_mac_header(skb), maclen); NAPI_GRO_CB(p)->same_flow = !diffs; } @@ -3871,6 +3859,27 @@ static void skb_gro_reset_offset(struct sk_buff *skb) } } +static void gro_pull_from_frag0(struct sk_buff *skb, int grow) +{ + struct skb_shared_info *pinfo = skb_shinfo(skb); + + BUG_ON(skb->end - skb->tail < grow); + + memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); + + skb->data_len -= grow; + skb->tail += grow; + + pinfo->frags[0].page_offset += grow; + skb_frag_size_sub(&pinfo->frags[0], grow); + + if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { + skb_frag_unref(skb, 0); + memmove(pinfo->frags, pinfo->frags + 1, + --pinfo->nr_frags * sizeof(pinfo->frags[0])); + } +} + static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff **pp = NULL; @@ -3879,14 +3888,14 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff struct list_head *head = &offload_base; int same_flow; enum gro_result ret; + int grow; - if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) + if (!(skb->dev->features & NETIF_F_GRO)) goto normal; if (skb_is_gso(skb) || skb_has_frag_list(skb)) goto normal; - skb_gro_reset_offset(skb); gro_list_prepare(napi, skb); NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */ @@ -3950,27 +3959,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = GRO_HELD; pull: - if (skb_headlen(skb) < skb_gro_offset(skb)) { - int grow = skb_gro_offset(skb) - skb_headlen(skb); - - BUG_ON(skb->end - skb->tail < grow); - - memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); - - skb->tail += grow; - skb->data_len -= grow; - - skb_shinfo(skb)->frags[0].page_offset += grow; - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); - - if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { - skb_frag_unref(skb, 0); - memmove(skb_shinfo(skb)->frags, - skb_shinfo(skb)->frags + 1, - --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); - } - } - + grow = skb_gro_offset(skb) - skb_headlen(skb); + if (grow > 0) + gro_pull_from_frag0(skb, grow); ok: return ret; @@ -4038,6 +4029,8 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { trace_napi_gro_receive_entry(skb); + skb_gro_reset_offset(skb); + return napi_skb_finish(dev_gro_receive(napi, skb), skb); } EXPORT_SYMBOL(napi_gro_receive); @@ -4066,12 +4059,16 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi) } EXPORT_SYMBOL(napi_get_frags); -static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, - gro_result_t ret) +static gro_result_t napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) { switch (ret) { case GRO_NORMAL: - if (netif_receive_skb_internal(skb)) + case GRO_HELD: + __skb_push(skb, ETH_HLEN); + skb->protocol = eth_type_trans(skb, skb->dev); + if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) ret = GRO_DROP; break; @@ -4080,7 +4077,6 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff * napi_reuse_skb(napi, skb); break; - case GRO_HELD: case GRO_MERGED: break; } @@ -4088,17 +4084,41 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff * return ret; } +/* Upper GRO stack assumes network header starts at gro_offset=0 + * Drivers could call both napi_gro_frags() and napi_gro_receive() + * We copy ethernet header into skb->data to have a common layout. + */ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; + const struct ethhdr *eth; + unsigned int hlen = sizeof(*eth); napi->skb = NULL; - if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) { - napi_reuse_skb(napi, skb); - return NULL; + skb_reset_mac_header(skb); + skb_gro_reset_offset(skb); + + eth = skb_gro_header_fast(skb, 0); + if (unlikely(skb_gro_header_hard(skb, hlen))) { + eth = skb_gro_header_slow(skb, hlen, 0); + if (unlikely(!eth)) { + napi_reuse_skb(napi, skb); + return NULL; + } + } else { + gro_pull_from_frag0(skb, hlen); + NAPI_GRO_CB(skb)->frag0 += hlen; + NAPI_GRO_CB(skb)->frag0_len -= hlen; } - skb->protocol = eth_type_trans(skb, skb->dev); + __skb_pull(skb, hlen); + + /* + * This works because the only protocols we care about don't require + * special handling. + * We'll fix it up properly in napi_frags_finish() + */ + skb->protocol = eth->h_proto; return skb; } @@ -6251,6 +6271,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, netdev_stats_to_stats64(storage, &dev->stats); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); + storage->tx_dropped += atomic_long_read(&dev->tx_dropped); return storage; } EXPORT_SYMBOL(dev_get_stats); diff --git a/net/core/filter.c b/net/core/filter.c index ad30d626a5b..765556ba32e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1,11 +1,16 @@ /* * Linux Socket Filter - Kernel level socket filtering * - * Author: - * Jay Schulist <jschlst@samba.org> + * Based on the design of the Berkeley Packet Filter. The new + * internal format has been designed by PLUMgrid: * - * Based on the design of: - * - The Berkeley Packet Filter + * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com + * + * Authors: + * + * Jay Schulist <jschlst@samba.org> + * Alexei Starovoitov <ast@plumgrid.com> + * Daniel Borkmann <dborkman@redhat.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -108,304 +113,1045 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sk_filter); +/* Base function for offset calculation. Needs to go into .text section, + * therefore keeping it non-static as well; will also be used by JITs + * anyway later on, so do not let the compiler omit it. + */ +noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + return 0; +} + /** - * sk_run_filter - run a filter on a socket - * @skb: buffer to run the filter on - * @fentry: filter to apply + * __sk_run_filter - run a filter on a given context + * @ctx: buffer to run the filter on + * @insn: filter to apply * - * Decode and apply filter instructions to the skb->data. - * Return length to keep, 0 for none. @skb is the data we are - * filtering, @filter is the array of filter instructions. - * Because all jumps are guaranteed to be before last instruction, - * and last instruction guaranteed to be a RET, we dont need to check - * flen. (We used to pass to this function the length of filter) + * Decode and apply filter instructions to the skb->data. Return length to + * keep, 0 for none. @ctx is the data we are operating on, @insn is the + * array of filter instructions. */ -unsigned int sk_run_filter(const struct sk_buff *skb, - const struct sock_filter *fentry) +unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) { + u64 stack[MAX_BPF_STACK / sizeof(u64)]; + u64 regs[MAX_BPF_REG], tmp; void *ptr; - u32 A = 0; /* Accumulator */ - u32 X = 0; /* Index Register */ - u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ - u32 tmp; - int k; + int off; - /* - * Process array of filter instructions. - */ - for (;; fentry++) { -#if defined(CONFIG_X86_32) -#define K (fentry->k) -#else - const u32 K = fentry->k; -#endif - - switch (fentry->code) { - case BPF_S_ALU_ADD_X: - A += X; - continue; - case BPF_S_ALU_ADD_K: - A += K; - continue; - case BPF_S_ALU_SUB_X: - A -= X; - continue; - case BPF_S_ALU_SUB_K: - A -= K; - continue; - case BPF_S_ALU_MUL_X: - A *= X; - continue; - case BPF_S_ALU_MUL_K: - A *= K; - continue; - case BPF_S_ALU_DIV_X: - if (X == 0) - return 0; - A /= X; - continue; - case BPF_S_ALU_DIV_K: - A /= K; - continue; - case BPF_S_ALU_MOD_X: - if (X == 0) - return 0; - A %= X; - continue; - case BPF_S_ALU_MOD_K: - A %= K; - continue; - case BPF_S_ALU_AND_X: - A &= X; - continue; - case BPF_S_ALU_AND_K: - A &= K; - continue; - case BPF_S_ALU_OR_X: - A |= X; - continue; - case BPF_S_ALU_OR_K: - A |= K; - continue; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: - A ^= X; - continue; - case BPF_S_ALU_XOR_K: - A ^= K; - continue; - case BPF_S_ALU_LSH_X: - A <<= X; - continue; - case BPF_S_ALU_LSH_K: - A <<= K; - continue; - case BPF_S_ALU_RSH_X: - A >>= X; - continue; - case BPF_S_ALU_RSH_K: - A >>= K; - continue; - case BPF_S_ALU_NEG: - A = -A; - continue; - case BPF_S_JMP_JA: - fentry += K; - continue; - case BPF_S_JMP_JGT_K: - fentry += (A > K) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JGE_K: - fentry += (A >= K) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JEQ_K: - fentry += (A == K) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JSET_K: - fentry += (A & K) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JGT_X: - fentry += (A > X) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JGE_X: - fentry += (A >= X) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JEQ_X: - fentry += (A == X) ? fentry->jt : fentry->jf; - continue; - case BPF_S_JMP_JSET_X: - fentry += (A & X) ? fentry->jt : fentry->jf; - continue; - case BPF_S_LD_W_ABS: - k = K; -load_w: - ptr = load_pointer(skb, k, 4, &tmp); - if (ptr != NULL) { - A = get_unaligned_be32(ptr); - continue; - } - return 0; - case BPF_S_LD_H_ABS: - k = K; -load_h: - ptr = load_pointer(skb, k, 2, &tmp); - if (ptr != NULL) { - A = get_unaligned_be16(ptr); - continue; +#define K insn->imm +#define A regs[insn->a_reg] +#define X regs[insn->x_reg] +#define R0 regs[0] + +#define CONT ({insn++; goto select_insn; }) +#define CONT_JMP ({insn++; goto select_insn; }) + + static const void *jumptable[256] = { + [0 ... 255] = &&default_label, + /* Now overwrite non-defaults ... */ +#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C + DL(BPF_ALU, BPF_ADD, BPF_X), + DL(BPF_ALU, BPF_ADD, BPF_K), + DL(BPF_ALU, BPF_SUB, BPF_X), + DL(BPF_ALU, BPF_SUB, BPF_K), + DL(BPF_ALU, BPF_AND, BPF_X), + DL(BPF_ALU, BPF_AND, BPF_K), + DL(BPF_ALU, BPF_OR, BPF_X), + DL(BPF_ALU, BPF_OR, BPF_K), + DL(BPF_ALU, BPF_LSH, BPF_X), + DL(BPF_ALU, BPF_LSH, BPF_K), + DL(BPF_ALU, BPF_RSH, BPF_X), + DL(BPF_ALU, BPF_RSH, BPF_K), + DL(BPF_ALU, BPF_XOR, BPF_X), + DL(BPF_ALU, BPF_XOR, BPF_K), + DL(BPF_ALU, BPF_MUL, BPF_X), + DL(BPF_ALU, BPF_MUL, BPF_K), + DL(BPF_ALU, BPF_MOV, BPF_X), + DL(BPF_ALU, BPF_MOV, BPF_K), + DL(BPF_ALU, BPF_DIV, BPF_X), + DL(BPF_ALU, BPF_DIV, BPF_K), + DL(BPF_ALU, BPF_MOD, BPF_X), + DL(BPF_ALU, BPF_MOD, BPF_K), + DL(BPF_ALU, BPF_NEG, 0), + DL(BPF_ALU, BPF_END, BPF_TO_BE), + DL(BPF_ALU, BPF_END, BPF_TO_LE), + DL(BPF_ALU64, BPF_ADD, BPF_X), + DL(BPF_ALU64, BPF_ADD, BPF_K), + DL(BPF_ALU64, BPF_SUB, BPF_X), + DL(BPF_ALU64, BPF_SUB, BPF_K), + DL(BPF_ALU64, BPF_AND, BPF_X), + DL(BPF_ALU64, BPF_AND, BPF_K), + DL(BPF_ALU64, BPF_OR, BPF_X), + DL(BPF_ALU64, BPF_OR, BPF_K), + DL(BPF_ALU64, BPF_LSH, BPF_X), + DL(BPF_ALU64, BPF_LSH, BPF_K), + DL(BPF_ALU64, BPF_RSH, BPF_X), + DL(BPF_ALU64, BPF_RSH, BPF_K), + DL(BPF_ALU64, BPF_XOR, BPF_X), + DL(BPF_ALU64, BPF_XOR, BPF_K), + DL(BPF_ALU64, BPF_MUL, BPF_X), + DL(BPF_ALU64, BPF_MUL, BPF_K), + DL(BPF_ALU64, BPF_MOV, BPF_X), + DL(BPF_ALU64, BPF_MOV, BPF_K), + DL(BPF_ALU64, BPF_ARSH, BPF_X), + DL(BPF_ALU64, BPF_ARSH, BPF_K), + DL(BPF_ALU64, BPF_DIV, BPF_X), + DL(BPF_ALU64, BPF_DIV, BPF_K), + DL(BPF_ALU64, BPF_MOD, BPF_X), + DL(BPF_ALU64, BPF_MOD, BPF_K), + DL(BPF_ALU64, BPF_NEG, 0), + DL(BPF_JMP, BPF_CALL, 0), + DL(BPF_JMP, BPF_JA, 0), + DL(BPF_JMP, BPF_JEQ, BPF_X), + DL(BPF_JMP, BPF_JEQ, BPF_K), + DL(BPF_JMP, BPF_JNE, BPF_X), + DL(BPF_JMP, BPF_JNE, BPF_K), + DL(BPF_JMP, BPF_JGT, BPF_X), + DL(BPF_JMP, BPF_JGT, BPF_K), + DL(BPF_JMP, BPF_JGE, BPF_X), + DL(BPF_JMP, BPF_JGE, BPF_K), + DL(BPF_JMP, BPF_JSGT, BPF_X), + DL(BPF_JMP, BPF_JSGT, BPF_K), + DL(BPF_JMP, BPF_JSGE, BPF_X), + DL(BPF_JMP, BPF_JSGE, BPF_K), + DL(BPF_JMP, BPF_JSET, BPF_X), + DL(BPF_JMP, BPF_JSET, BPF_K), + DL(BPF_JMP, BPF_EXIT, 0), + DL(BPF_STX, BPF_MEM, BPF_B), + DL(BPF_STX, BPF_MEM, BPF_H), + DL(BPF_STX, BPF_MEM, BPF_W), + DL(BPF_STX, BPF_MEM, BPF_DW), + DL(BPF_STX, BPF_XADD, BPF_W), + DL(BPF_STX, BPF_XADD, BPF_DW), + DL(BPF_ST, BPF_MEM, BPF_B), + DL(BPF_ST, BPF_MEM, BPF_H), + DL(BPF_ST, BPF_MEM, BPF_W), + DL(BPF_ST, BPF_MEM, BPF_DW), + DL(BPF_LDX, BPF_MEM, BPF_B), + DL(BPF_LDX, BPF_MEM, BPF_H), + DL(BPF_LDX, BPF_MEM, BPF_W), + DL(BPF_LDX, BPF_MEM, BPF_DW), + DL(BPF_LD, BPF_ABS, BPF_W), + DL(BPF_LD, BPF_ABS, BPF_H), + DL(BPF_LD, BPF_ABS, BPF_B), + DL(BPF_LD, BPF_IND, BPF_W), + DL(BPF_LD, BPF_IND, BPF_H), + DL(BPF_LD, BPF_IND, BPF_B), +#undef DL + }; + + regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; + regs[ARG1_REG] = (u64) (unsigned long) ctx; + +select_insn: + goto *jumptable[insn->code]; + + /* ALU */ +#define ALU(OPCODE, OP) \ + BPF_ALU64_##OPCODE##_BPF_X: \ + A = A OP X; \ + CONT; \ + BPF_ALU_##OPCODE##_BPF_X: \ + A = (u32) A OP (u32) X; \ + CONT; \ + BPF_ALU64_##OPCODE##_BPF_K: \ + A = A OP K; \ + CONT; \ + BPF_ALU_##OPCODE##_BPF_K: \ + A = (u32) A OP (u32) K; \ + CONT; + + ALU(BPF_ADD, +) + ALU(BPF_SUB, -) + ALU(BPF_AND, &) + ALU(BPF_OR, |) + ALU(BPF_LSH, <<) + ALU(BPF_RSH, >>) + ALU(BPF_XOR, ^) + ALU(BPF_MUL, *) +#undef ALU + BPF_ALU_BPF_NEG_0: + A = (u32) -A; + CONT; + BPF_ALU64_BPF_NEG_0: + A = -A; + CONT; + BPF_ALU_BPF_MOV_BPF_X: + A = (u32) X; + CONT; + BPF_ALU_BPF_MOV_BPF_K: + A = (u32) K; + CONT; + BPF_ALU64_BPF_MOV_BPF_X: + A = X; + CONT; + BPF_ALU64_BPF_MOV_BPF_K: + A = K; + CONT; + BPF_ALU64_BPF_ARSH_BPF_X: + (*(s64 *) &A) >>= X; + CONT; + BPF_ALU64_BPF_ARSH_BPF_K: + (*(s64 *) &A) >>= K; + CONT; + BPF_ALU64_BPF_MOD_BPF_X: + tmp = A; + if (X) + A = do_div(tmp, X); + CONT; + BPF_ALU_BPF_MOD_BPF_X: + tmp = (u32) A; + if (X) + A = do_div(tmp, (u32) X); + CONT; + BPF_ALU64_BPF_MOD_BPF_K: + tmp = A; + if (K) + A = do_div(tmp, K); + CONT; + BPF_ALU_BPF_MOD_BPF_K: + tmp = (u32) A; + if (K) + A = do_div(tmp, (u32) K); + CONT; + BPF_ALU64_BPF_DIV_BPF_X: + if (X) + do_div(A, X); + CONT; + BPF_ALU_BPF_DIV_BPF_X: + tmp = (u32) A; + if (X) + do_div(tmp, (u32) X); + A = (u32) tmp; + CONT; + BPF_ALU64_BPF_DIV_BPF_K: + if (K) + do_div(A, K); + CONT; + BPF_ALU_BPF_DIV_BPF_K: + tmp = (u32) A; + if (K) + do_div(tmp, (u32) K); + A = (u32) tmp; + CONT; + BPF_ALU_BPF_END_BPF_TO_BE: + switch (K) { + case 16: + A = (__force u16) cpu_to_be16(A); + break; + case 32: + A = (__force u32) cpu_to_be32(A); + break; + case 64: + A = (__force u64) cpu_to_be64(A); + break; + } + CONT; + BPF_ALU_BPF_END_BPF_TO_LE: + switch (K) { + case 16: + A = (__force u16) cpu_to_le16(A); + break; + case 32: + A = (__force u32) cpu_to_le32(A); + break; + case 64: + A = (__force u64) cpu_to_le64(A); + break; + } + CONT; + + /* CALL */ + BPF_JMP_BPF_CALL_0: + /* Function call scratches R1-R5 registers, preserves R6-R9, + * and stores return value into R0. + */ + R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3], + regs[4], regs[5]); + CONT; + + /* JMP */ + BPF_JMP_BPF_JA_0: + insn += insn->off; + CONT; + BPF_JMP_BPF_JEQ_BPF_X: + if (A == X) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JEQ_BPF_K: + if (A == K) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JNE_BPF_X: + if (A != X) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JNE_BPF_K: + if (A != K) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JGT_BPF_X: + if (A > X) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JGT_BPF_K: + if (A > K) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JGE_BPF_X: + if (A >= X) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JGE_BPF_K: + if (A >= K) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSGT_BPF_X: + if (((s64)A) > ((s64)X)) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSGT_BPF_K: + if (((s64)A) > ((s64)K)) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSGE_BPF_X: + if (((s64)A) >= ((s64)X)) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSGE_BPF_K: + if (((s64)A) >= ((s64)K)) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSET_BPF_X: + if (A & X) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_JSET_BPF_K: + if (A & K) { + insn += insn->off; + CONT_JMP; + } + CONT; + BPF_JMP_BPF_EXIT_0: + return R0; + + /* STX and ST and LDX*/ +#define LDST(SIZEOP, SIZE) \ + BPF_STX_BPF_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (A + insn->off) = X; \ + CONT; \ + BPF_ST_BPF_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (A + insn->off) = K; \ + CONT; \ + BPF_LDX_BPF_MEM_##SIZEOP: \ + A = *(SIZE *)(unsigned long) (X + insn->off); \ + CONT; + + LDST(BPF_B, u8) + LDST(BPF_H, u16) + LDST(BPF_W, u32) + LDST(BPF_DW, u64) +#undef LDST + BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ + atomic_add((u32) X, (atomic_t *)(unsigned long) + (A + insn->off)); + CONT; + BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ + atomic64_add((u64) X, (atomic64_t *)(unsigned long) + (A + insn->off)); + CONT; + BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ + off = K; +load_word: + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only + * appearing in the programs where ctx == skb. All programs + * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter() + * saves it in R6, internal BPF verifier will check that + * R6 == ctx. + * + * BPF_ABS and BPF_IND are wrappers of function calls, so + * they scratch R1-R5 registers, preserve R6-R9, and store + * return value into R0. + * + * Implicit input: + * ctx + * + * Explicit input: + * X == any register + * K == 32-bit immediate + * + * Output: + * R0 - 8/16/32-bit skb data converted to cpu endianness + */ + ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); + if (likely(ptr != NULL)) { + R0 = get_unaligned_be32(ptr); + CONT; + } + return 0; + BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ + off = K; +load_half: + ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); + if (likely(ptr != NULL)) { + R0 = get_unaligned_be16(ptr); + CONT; + } + return 0; + BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ + off = K; +load_byte: + ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); + if (likely(ptr != NULL)) { + R0 = *(u8 *)ptr; + CONT; + } + return 0; + BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ + off = K + X; + goto load_word; + BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ + off = K + X; + goto load_half; + BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ + off = K + X; + goto load_byte; + + default_label: + /* If we ever reach this, we have a bug somewhere. */ + WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); + return 0; +#undef CONT_JMP +#undef CONT + +#undef R0 +#undef X +#undef A +#undef K +} + +u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, + const struct sock_filter_int *insni) + __attribute__ ((alias ("__sk_run_filter"))); + +u32 sk_run_filter_int_skb(const struct sk_buff *ctx, + const struct sock_filter_int *insni) + __attribute__ ((alias ("__sk_run_filter"))); +EXPORT_SYMBOL_GPL(sk_run_filter_int_skb); + +/* Helper to find the offset of pkt_type in sk_buff structure. We want + * to make sure its still a 3bit field starting at a byte boundary; + * taken from arch/x86/net/bpf_jit_comp.c. + */ +#define PKT_TYPE_MAX 7 +static unsigned int pkt_type_offset(void) +{ + struct sk_buff skb_probe = { .pkt_type = ~0, }; + u8 *ct = (u8 *) &skb_probe; + unsigned int off; + + for (off = 0; off < sizeof(struct sk_buff); off++) { + if (ct[off] == PKT_TYPE_MAX) + return off; + } + + pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__); + return -1; +} + +static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *)(long) ctx; + + return __skb_get_poff(skb); +} + +static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct nlattr *nla; + + if (skb_is_nonlinear(skb)) + return 0; + + if (A > skb->len - sizeof(struct nlattr)) + return 0; + + nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); + if (nla) + return (void *) nla - (void *) skb->data; + + return 0; +} + +static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct nlattr *nla; + + if (skb_is_nonlinear(skb)) + return 0; + + if (A > skb->len - sizeof(struct nlattr)) + return 0; + + nla = (struct nlattr *) &skb->data[A]; + if (nla->nla_len > A - skb->len) + return 0; + + nla = nla_find_nested(nla, X); + if (nla) + return (void *) nla - (void *) skb->data; + + return 0; +} + +static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) +{ + return raw_smp_processor_id(); +} + +/* Register mappings for user programs. */ +#define A_REG 0 +#define X_REG 7 +#define TMP_REG 8 +#define ARG2_REG 2 +#define ARG3_REG 3 + +static bool convert_bpf_extensions(struct sock_filter *fp, + struct sock_filter_int **insnp) +{ + struct sock_filter_int *insn = *insnp; + + switch (fp->k) { + case SKF_AD_OFF + SKF_AD_PROTOCOL: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); + + insn->code = BPF_LDX | BPF_MEM | BPF_H; + insn->a_reg = A_REG; + insn->x_reg = CTX_REG; + insn->off = offsetof(struct sk_buff, protocol); + insn++; + + /* A = ntohs(A) [emitting a nop or swap16] */ + insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; + insn->a_reg = A_REG; + insn->imm = 16; + break; + + case SKF_AD_OFF + SKF_AD_PKTTYPE: + insn->code = BPF_LDX | BPF_MEM | BPF_B; + insn->a_reg = A_REG; + insn->x_reg = CTX_REG; + insn->off = pkt_type_offset(); + if (insn->off < 0) + return false; + insn++; + + insn->code = BPF_ALU | BPF_AND | BPF_K; + insn->a_reg = A_REG; + insn->imm = PKT_TYPE_MAX; + break; + + case SKF_AD_OFF + SKF_AD_IFINDEX: + case SKF_AD_OFF + SKF_AD_HATYPE: + if (FIELD_SIZEOF(struct sk_buff, dev) == 8) + insn->code = BPF_LDX | BPF_MEM | BPF_DW; + else + insn->code = BPF_LDX | BPF_MEM | BPF_W; + insn->a_reg = TMP_REG; + insn->x_reg = CTX_REG; |