aboutsummaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/6lowpan.h435
-rw-r--r--include/net/9p/9p.h281
-rw-r--r--include/net/9p/client.h53
-rw-r--r--include/net/9p/transport.h14
-rw-r--r--include/net/Space.h31
-rw-r--r--include/net/act_api.h118
-rw-r--r--include/net/addrconf.h264
-rw-r--r--include/net/af_ieee802154.h14
-rw-r--r--include/net/af_rxrpc.h38
-rw-r--r--include/net/af_unix.h38
-rw-r--r--include/net/af_vsock.h179
-rw-r--r--include/net/ah.h2
-rw-r--r--include/net/arp.h71
-rw-r--r--include/net/atmclip.h9
-rw-r--r--include/net/ax25.h233
-rw-r--r--include/net/bluetooth/bluetooth.h236
-rw-r--r--include/net/bluetooth/hci.h912
-rw-r--r--include/net/bluetooth/hci_core.h1163
-rw-r--r--include/net/bluetooth/hci_mon.h51
-rw-r--r--include/net/bluetooth/l2cap.h799
-rw-r--r--include/net/bluetooth/mgmt.h580
-rw-r--r--include/net/bluetooth/rfcomm.h56
-rw-r--r--include/net/bluetooth/sco.h20
-rw-r--r--include/net/busy_poll.h169
-rw-r--r--include/net/caif/caif_dev.h67
-rw-r--r--include/net/caif/caif_device.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/caif/caif_layer.h42
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/caif/caif_spi.h4
-rw-r--r--include/net/caif/cfcnfg.h94
-rw-r--r--include/net/caif/cfctrl.h17
-rw-r--r--include/net/caif/cffrml.h9
-rw-r--r--include/net/caif/cfmuxl.h4
-rw-r--r--include/net/caif/cfpkt.h87
-rw-r--r--include/net/caif/cfserl.h6
-rw-r--r--include/net/caif/cfsrvl.h34
-rw-r--r--include/net/cfg80211-wext.h55
-rw-r--r--include/net/cfg80211.h3356
-rw-r--r--include/net/checksum.h43
-rw-r--r--include/net/cipso_ipv4.h38
-rw-r--r--include/net/cls_cgroup.h43
-rw-r--r--include/net/codel.h355
-rw-r--r--include/net/compat.h46
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dcbevent.h48
-rw-r--r--include/net/dcbnl.h56
-rw-r--r--include/net/dn.h29
-rw-r--r--include/net/dn_dev.h32
-rw-r--r--include/net/dn_fib.h79
-rw-r--r--include/net/dn_neigh.h12
-rw-r--r--include/net/dn_nsp.h49
-rw-r--r--include/net/dn_route.h27
-rw-r--r--include/net/dsa.h149
-rw-r--r--include/net/dsfield.h6
-rw-r--r--include/net/dst.h311
-rw-r--r--include/net/dst_ops.h14
-rw-r--r--include/net/esp.h12
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/fib_rules.h32
-rw-r--r--include/net/firewire.h25
-rw-r--r--include/net/flow.h277
-rw-r--r--include/net/flow_keys.h18
-rw-r--r--include/net/flowcache.h25
-rw-r--r--include/net/garp.h25
-rw-r--r--include/net/gen_stats.h53
-rw-r--r--include/net/genetlink.h224
-rw-r--r--include/net/gre.h86
-rw-r--r--include/net/gro_cells.h107
-rw-r--r--include/net/icmp.h19
-rw-r--r--include/net/ieee80211_radiotap.h82
-rw-r--r--include/net/ieee802154.h43
-rw-r--r--include/net/ieee802154_netdev.h406
-rw-r--r--include/net/if_inet6.h116
-rw-r--r--include/net/inet6_connection_sock.h31
-rw-r--r--include/net/inet6_hashtables.h85
-rw-r--r--include/net/inet_common.h46
-rw-r--r--include/net/inet_connection_sock.h87
-rw-r--r--include/net/inet_ecn.h106
-rw-r--r--include/net/inet_frag.h133
-rw-r--r--include/net/inet_hashtables.h140
-rw-r--r--include/net/inet_sock.h121
-rw-r--r--include/net/inet_timewait_sock.h113
-rw-r--r--include/net/inetpeer.h152
-rw-r--r--include/net/ip.h362
-rw-r--r--include/net/ip6_checksum.h102
-rw-r--r--include/net/ip6_fib.h153
-rw-r--r--include/net/ip6_route.h157
-rw-r--r--include/net/ip6_tunnel.h64
-rw-r--r--include/net/ip_fib.h190
-rw-r--r--include/net/ip_tunnels.h184
-rw-r--r--include/net/ip_vs.h1041
-rw-r--r--include/net/ipip.h67
-rw-r--r--include/net/ipv6.h647
-rw-r--r--include/net/ipx.h25
-rw-r--r--include/net/irda/discovery.h4
-rw-r--r--include/net/irda/ircomm_core.h4
-rw-r--r--include/net/irda/ircomm_event.h4
-rw-r--r--include/net/irda/ircomm_lmp.h4
-rw-r--r--include/net/irda/ircomm_param.h4
-rw-r--r--include/net/irda/ircomm_ttp.h4
-rw-r--r--include/net/irda/ircomm_tty.h35
-rw-r--r--include/net/irda/ircomm_tty_attach.h4
-rw-r--r--include/net/irda/irda.h21
-rw-r--r--include/net/irda/irda_device.h6
-rw-r--r--include/net/irda/irlan_common.h3
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/irda/irlap_event.h6
-rw-r--r--include/net/irda/irlap_frame.h8
-rw-r--r--include/net/irda/irlmp.h7
-rw-r--r--include/net/irda/irttp.h2
-rw-r--r--include/net/irda/parameters.h4
-rw-r--r--include/net/irda/qos.h4
-rw-r--r--include/net/irda/wrapper.h2
-rw-r--r--include/net/iucv/af_iucv.h65
-rw-r--r--include/net/iucv/iucv.h38
-rw-r--r--include/net/iw_handler.h42
-rw-r--r--include/net/lapb.h60
-rw-r--r--include/net/lib80211.h12
-rw-r--r--include/net/llc.h54
-rw-r--r--include/net/llc_c_ac.h190
-rw-r--r--include/net/llc_c_ev.h209
-rw-r--r--include/net/llc_conn.h36
-rw-r--r--include/net/llc_if.h37
-rw-r--r--include/net/llc_pdu.h52
-rw-r--r--include/net/llc_s_ac.h20
-rw-r--r--include/net/llc_s_ev.h21
-rw-r--r--include/net/llc_sap.h22
-rw-r--r--include/net/mac80211.h2565
-rw-r--r--include/net/mac802154.h179
-rw-r--r--include/net/mip6.h3
-rw-r--r--include/net/mld.h51
-rw-r--r--include/net/mrp.h142
-rw-r--r--include/net/ndisc.h185
-rw-r--r--include/net/neighbour.h243
-rw-r--r--include/net/net_namespace.h170
-rw-r--r--include/net/net_ratelimit.h8
-rw-r--r--include/net/netevent.h12
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h6
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h2
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h128
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h10
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h9
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h171
-rw-r--r--include/net/netfilter/nf_conntrack.h137
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h24
-rw-r--r--include/net/netfilter/nf_conntrack_core.h68
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h88
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h18
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h43
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h79
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h25
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h79
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h60
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h47
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h75
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h98
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h78
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h30
-rw-r--r--include/net/netfilter/nf_log.h18
-rw-r--r--include/net/netfilter/nf_nat.h86
-rw-r--r--include/net/netfilter/nf_nat_core.h17
-rw-r--r--include/net/netfilter/nf_nat_helper.h43
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h49
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h72
-rw-r--r--include/net/netfilter/nf_nat_protocol.h74
-rw-r--r--include/net/netfilter/nf_nat_rule.h15
-rw-r--r--include/net/netfilter/nf_queue.h78
-rw-r--r--include/net/netfilter/nf_tables.h655
-rw-r--r--include/net/netfilter/nf_tables_core.h52
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h26
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h33
-rw-r--r--include/net/netfilter/nf_tproxy_core.h218
-rw-r--r--include/net/netfilter/nfnetlink_log.h3
-rw-r--r--include/net/netfilter/nfnetlink_queue.h51
-rw-r--r--include/net/netfilter/nft_meta.h36
-rw-r--r--include/net/netfilter/nft_reject.h25
-rw-r--r--include/net/netfilter/xt_log.h4
-rw-r--r--include/net/netfilter/xt_rateest.h6
-rw-r--r--include/net/netlabel.h11
-rw-r--r--include/net/netlink.h417
-rw-r--r--include/net/netns/conntrack.h107
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/netns/hash.h2
-rw-r--r--include/net/netns/ieee802154_6lowpan.h22
-rw-r--r--include/net/netns/ipv4.h48
-rw-r--r--include/net/netns/ipv6.h20
-rw-r--r--include/net/netns/mib.h6
-rw-r--r--include/net/netns/netfilter.h18
-rw-r--r--include/net/netns/nftables.h20
-rw-r--r--include/net/netns/packet.h4
-rw-r--r--include/net/netns/sctp.h134
-rw-r--r--include/net/netns/x_tables.h7
-rw-r--r--include/net/netns/xfrm.h17
-rw-r--r--include/net/netprio_cgroup.h50
-rw-r--r--include/net/netrom.h105
-rw-r--r--include/net/nfc/digital.h248
-rw-r--r--include/net/nfc/hci.h254
-rw-r--r--include/net/nfc/llc.h52
-rw-r--r--include/net/nfc/nci.h396
-rw-r--r--include/net/nfc/nci_core.h232
-rw-r--r--include/net/nfc/nfc.h270
-rw-r--r--include/net/nl802154.h6
-rw-r--r--include/net/p8022.h18
-rw-r--r--include/net/phonet/gprs.h2
-rw-r--r--include/net/phonet/pep.h23
-rw-r--r--include/net/phonet/phonet.h5
-rw-r--r--include/net/phonet/pn_dev.h2
-rw-r--r--include/net/ping.h111
-rw-r--r--include/net/pkt_cls.h100
-rw-r--r--include/net/pkt_sched.h66
-rw-r--r--include/net/protocol.h78
-rw-r--r--include/net/psnap.h4
-rw-r--r--include/net/raw.h6
-rw-r--r--include/net/rawv6.h9
-rw-r--r--include/net/red.h204
-rw-r--r--include/net/regulatory.h138
-rw-r--r--include/net/request_sock.h83
-rw-r--r--include/net/rose.h120
-rw-r--r--include/net/route.h296
-rw-r--r--include/net/rtnetlink.h69
-rw-r--r--include/net/sch_generic.h255
-rw-r--r--include/net/scm.h53
-rw-r--r--include/net/sctp/auth.h13
-rw-r--r--include/net/sctp/checksum.h67
-rw-r--r--include/net/sctp/command.h65
-rw-r--r--include/net/sctp/constants.h43
-rw-r--r--include/net/sctp/sctp.h273
-rw-r--r--include/net/sctp/sm.h28
-rw-r--r--include/net/sctp/structs.h376
-rw-r--r--include/net/sctp/tsnmap.h16
-rw-r--r--include/net/sctp/ulpevent.h18
-rw-r--r--include/net/sctp/ulpqueue.h16
-rw-r--r--include/net/sctp/user.h711
-rw-r--r--include/net/secure_seq.h18
-rw-r--r--include/net/snmp.h114
-rw-r--r--include/net/sock.h1132
-rw-r--r--include/net/stp.h4
-rw-r--r--include/net/tc_act/tc_csum.h4
-rw-r--r--include/net/tc_act/tc_defact.h4
-rw-r--r--include/net/tc_act/tc_gact.h4
-rw-r--r--include/net/tc_act/tc_ipt.h4
-rw-r--r--include/net/tc_act/tc_mirred.h4
-rw-r--r--include/net/tc_act/tc_nat.h4
-rw-r--r--include/net/tc_act/tc_pedit.h4
-rw-r--r--include/net/tc_act/tc_skbedit.h7
-rw-r--r--include/net/tcp.h942
-rw-r--r--include/net/tcp_memcontrol.h7
-rw-r--r--include/net/timewait_sock.h9
-rw-r--r--include/net/transp_v6.h89
-rw-r--r--include/net/tso.h20
-rw-r--r--include/net/udp.h153
-rw-r--r--include/net/udplite.h77
-rw-r--r--include/net/vsock_addr.h30
-rw-r--r--include/net/vxlan.h62
-rw-r--r--include/net/wext.h16
-rw-r--r--include/net/wimax.h49
-rw-r--r--include/net/wpan-phy.h30
-rw-r--r--include/net/x25.h143
-rw-r--r--include/net/xfrm.h819
260 files changed, 24561 insertions, 8949 deletions
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
new file mode 100644
index 00000000000..79b530fb2c4
--- /dev/null
+++ b/include/net/6lowpan.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __6LOWPAN_H__
+#define __6LOWPAN_H__
+
+#include <net/ipv6.h>
+#include <net/net_namespace.h>
+
+#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
+#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
+#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
+#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
+
+/*
+ * ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+ ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
+ (((a)->s6_addr[9]) == (m)[1]) && \
+ (((a)->s6_addr[10]) == (m)[2]) && \
+ (((a)->s6_addr[11]) == (m)[3]) && \
+ (((a)->s6_addr[12]) == (m)[4]) && \
+ (((a)->s6_addr[13]) == (m)[5]) && \
+ (((a)->s6_addr[14]) == (m)[6]) && \
+ (((a)->s6_addr[15]) == (m)[7]))
+
+/* ipv6 address is unspecified */
+#define is_addr_unspecified(a) \
+ ((((a)->s6_addr32[0]) == 0) && \
+ (((a)->s6_addr32[1]) == 0) && \
+ (((a)->s6_addr32[2]) == 0) && \
+ (((a)->s6_addr32[3]) == 0))
+
+/* compare ipv6 addresses prefixes */
+#define ipaddr_prefixcmp(addr1, addr2, length) \
+ (memcmp(addr1, addr2, length >> 3) == 0)
+
+/* local link, i.e. FE80::/10 */
+#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
+
+/*
+ * check whether we can compress the IID to 16 bits,
+ * it's possible for unicast adresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a) \
+ ((((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr[10]) == 0) && \
+ (((a)->s6_addr[11]) == 0xff) && \
+ (((a)->s6_addr[12]) == 0xfe) && \
+ (((a)->s6_addr[13]) == 0))
+
+/* multicast address */
+#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
+#define lowpan_is_mcast_addr_compressable(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0) && \
+ ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a) \
+ ((((a)->s6_addr[1]) == 2) && \
+ (((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0))
+
+#define lowpan_is_addr_broadcast(a) \
+ ((((a)[0]) == 0xFF) && \
+ (((a)[1]) == 0xFF) && \
+ (((a)[2]) == 0xFF) && \
+ (((a)[3]) == 0xFF) && \
+ (((a)[4]) == 0xFF) && \
+ (((a)[5]) == 0xFF) && \
+ (((a)[6]) == 0xFF) && \
+ (((a)[7]) == 0xFF))
+
+#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
+#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
+#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
+
+#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ * - MTU is 127 octets
+ * - maximum MHR size is 37 octets
+ * - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ * MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE 88
+
+/*
+ * Values of fields within the IPHC encoding first byte
+ * (C stands for compressed and I for inline)
+ */
+#define LOWPAN_IPHC_TF 0x18
+
+#define LOWPAN_IPHC_FL_C 0x10
+#define LOWPAN_IPHC_TC_C 0x08
+#define LOWPAN_IPHC_NH_C 0x04
+#define LOWPAN_IPHC_TTL_1 0x01
+#define LOWPAN_IPHC_TTL_64 0x02
+#define LOWPAN_IPHC_TTL_255 0x03
+#define LOWPAN_IPHC_TTL_I 0x00
+
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID 0x80
+
+#define LOWPAN_IPHC_ADDR_00 0x00
+#define LOWPAN_IPHC_ADDR_01 0x01
+#define LOWPAN_IPHC_ADDR_02 0x02
+#define LOWPAN_IPHC_ADDR_03 0x03
+
+#define LOWPAN_IPHC_SAC 0x40
+#define LOWPAN_IPHC_SAM 0x30
+
+#define LOWPAN_IPHC_SAM_BIT 4
+
+#define LOWPAN_IPHC_M 0x08
+#define LOWPAN_IPHC_DAC 0x04
+#define LOWPAN_IPHC_DAM_00 0x00
+#define LOWPAN_IPHC_DAM_01 0x01
+#define LOWPAN_IPHC_DAM_10 0x02
+#define LOWPAN_IPHC_DAM_11 0x03
+
+#define LOWPAN_IPHC_DAM_BIT 0
+/*
+ * LOWPAN_UDP encoding (works together with IPHC)
+ */
+#define LOWPAN_NHC_UDP_MASK 0xF8
+#define LOWPAN_NHC_UDP_ID 0xF0
+#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
+#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
+
+#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
+#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
+ dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
+ dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
+
+#ifdef DEBUG
+/* print data in line */
+static inline void raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s: ", caller, msg);
+
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
+}
+
+/* print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s:\n", caller, msg);
+
+ print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+}
+#else
+static inline void raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len) { }
+static inline void raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len) { }
+#endif
+
+static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
+
+ *val = skb->data[0];
+ skb_pull(skb, 1);
+
+ return 0;
+}
+
+static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
+
+ *val = (skb->data[0] << 8) | skb->data[1];
+ skb_pull(skb, 2);
+
+ return 0;
+}
+
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+ void *data, const unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
+static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
+ const size_t len)
+{
+ memcpy(*hc_ptr, data, len);
+ *hc_ptr += len;
+}
+
+static inline u8 lowpan_addr_mode_size(const u8 addr_mode)
+{
+ static const u8 addr_sizes[] = {
+ [LOWPAN_IPHC_ADDR_00] = 16,
+ [LOWPAN_IPHC_ADDR_01] = 8,
+ [LOWPAN_IPHC_ADDR_02] = 2,
+ [LOWPAN_IPHC_ADDR_03] = 0,
+ };
+ return addr_sizes[addr_mode];
+}
+
+static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
+{
+ u8 ret = 1;
+
+ if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+ *uncomp_header += sizeof(struct udphdr);
+
+ switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ ret += 4;
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ case LOWPAN_NHC_UDP_CS_P_10:
+ ret += 3;
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ ret++;
+ break;
+ default:
+ break;
+ }
+
+ if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
+ ret += 2;
+ }
+
+ return ret;
+}
+
+/**
+ * lowpan_uncompress_size - returns skb->len size with uncompressed header
+ * @skb: sk_buff with 6lowpan header inside
+ * @datagram_offset: optional to get the datagram_offset value
+ *
+ * Returns the skb->len with uncompressed header
+ */
+static inline u16
+lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
+{
+ u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
+ u8 iphc0, iphc1, h_enc;
+
+ iphc0 = skb_network_header(skb)[0];
+ iphc1 = skb_network_header(skb)[1];
+
+ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+ case 0:
+ ret += 4;
+ break;
+ case 1:
+ ret += 3;
+ break;
+ case 2:
+ ret++;
+ break;
+ default:
+ break;
+ }
+
+ if (!(iphc0 & LOWPAN_IPHC_NH_C))
+ ret++;
+
+ if (!(iphc0 & 0x03))
+ ret++;
+
+ ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
+ LOWPAN_IPHC_SAM_BIT);
+
+ if (iphc1 & LOWPAN_IPHC_M) {
+ switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
+ LOWPAN_IPHC_DAM_BIT) {
+ case LOWPAN_IPHC_DAM_00:
+ ret += 16;
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ ret += 6;
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ ret += 4;
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ ret++;
+ break;
+ default:
+ break;
+ }
+ } else {
+ ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
+ LOWPAN_IPHC_DAM_BIT);
+ }
+
+ if (iphc0 & LOWPAN_IPHC_NH_C) {
+ h_enc = skb_network_header(skb)[ret];
+ ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
+ }
+
+ if (dgram_offset)
+ *dgram_offset = uncomp_header;
+
+ return skb->len + uncomp_header - ret;
+}
+
+typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
+
+int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
+ const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
+int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len);
+
+#endif /* __6LOWPAN_H__ */
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 071fd7a8d78..27dfe85772b 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -40,6 +40,7 @@
* @P9_DEBUG_FID: fid allocation/deallocation tracking
* @P9_DEBUG_PKT: packet marshalling/unmarshalling
* @P9_DEBUG_FSC: FS-cache tracing
+ * @P9_DEBUG_VPKT: Verbose packet debugging (full packet dump)
*
* These flags are passed at mount time to turn on various levels of
* verbosity and tracing which will be output to the system logs.
@@ -57,33 +58,21 @@ enum p9_debug_flags {
P9_DEBUG_FID = (1<<9),
P9_DEBUG_PKT = (1<<10),
P9_DEBUG_FSC = (1<<11),
+ P9_DEBUG_VPKT = (1<<12),
};
#ifdef CONFIG_NET_9P_DEBUG
extern unsigned int p9_debug_level;
-
-#define P9_DPRINTK(level, format, arg...) \
-do { \
- if ((p9_debug_level & level) == level) {\
- if (level == P9_DEBUG_9P) \
- printk(KERN_NOTICE "(%8.8d) " \
- format , task_pid_nr(current) , ## arg); \
- else \
- printk(KERN_NOTICE "-- %s (%d): " \
- format , __func__, task_pid_nr(current) , ## arg); \
- } \
-} while (0)
-
+__printf(3, 4)
+void _p9_debug(enum p9_debug_flags level, const char *func,
+ const char *fmt, ...);
+#define p9_debug(level, fmt, ...) \
+ _p9_debug(level, __func__, fmt, ##__VA_ARGS__)
#else
-#define P9_DPRINTK(level, format, arg...) do { } while (0)
+#define p9_debug(level, fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#endif
-#define P9_EPRINTK(level, format, arg...) \
-do { \
- printk(level "9p: %s (%d): " \
- format , __func__, task_pid_nr(current), ## arg); \
-} while (0)
-
/**
* enum p9_msg_t - 9P message types
* @P9_TLERROR: not used
@@ -119,7 +108,7 @@ do { \
* @P9_TREAD: request to transfer data from a file or directory
* @P9_RREAD: response with data requested
* @P9_TWRITE: reuqest to transfer data to a file
- * @P9_RWRITE: response with out much data was transfered to file
+ * @P9_RWRITE: response with out much data was transferred to file
* @P9_TCLUNK: forget about a handle to an entity within the file system
* @P9_RCLUNK: response when server has forgotten about the handle
* @P9_TREMOVE: request to remove an entity from the hierarchy
@@ -175,6 +164,10 @@ enum p9_msg_t {
P9_RLINK,
P9_TMKDIR = 72,
P9_RMKDIR,
+ P9_TRENAMEAT = 74,
+ P9_RRENAMEAT,
+ P9_TUNLINKAT = 76,
+ P9_RUNLINKAT,
P9_TVERSION = 100,
P9_RVERSION,
P9_TAUTH = 102,
@@ -241,10 +234,10 @@ enum p9_open_mode_t {
/**
* enum p9_perm_t - 9P permissions
- * @P9_DMDIR: mode bite for directories
+ * @P9_DMDIR: mode bit for directories
* @P9_DMAPPEND: mode bit for is append-only
* @P9_DMEXCL: mode bit for excluse use (only one open handle allowed)
- * @P9_DMMOUNT: mode bite for mount points
+ * @P9_DMMOUNT: mode bit for mount points
* @P9_DMAUTH: mode bit for authentication file
* @P9_DMTMP: mode bit for non-backed-up files
* @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u)
@@ -278,6 +271,35 @@ enum p9_perm_t {
P9_DMSETVTX = 0x00010000,
};
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY 00000000
+#define P9_DOTL_WRONLY 00000001
+#define P9_DOTL_RDWR 00000002
+#define P9_DOTL_NOACCESS 00000003
+#define P9_DOTL_CREATE 00000100
+#define P9_DOTL_EXCL 00000200
+#define P9_DOTL_NOCTTY 00000400
+#define P9_DOTL_TRUNC 00001000
+#define P9_DOTL_APPEND 00002000
+#define P9_DOTL_NONBLOCK 00004000
+#define P9_DOTL_DSYNC 00010000
+#define P9_DOTL_FASYNC 00020000
+#define P9_DOTL_DIRECT 00040000
+#define P9_DOTL_LARGEFILE 00100000
+#define P9_DOTL_DIRECTORY 00200000
+#define P9_DOTL_NOFOLLOW 00400000
+#define P9_DOTL_NOATIME 01000000
+#define P9_DOTL_CLOEXEC 02000000
+#define P9_DOTL_SYNC 04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR 0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
/**
* enum p9_qid_t - QID types
* @P9_QTDIR: directory
@@ -292,7 +314,7 @@ enum p9_perm_t {
*
* QID types are a subset of permissions - they are primarily
* used to differentiate semantics for a file system entity via
- * a jump-table. Their value is also the most signifigant 16 bits
+ * a jump-table. Their value is also the most significant 16 bits
* of the permission_t
*
* See Also: http://plan9.bell-labs.com/magic/man2html/2/stat
@@ -320,20 +342,8 @@ enum p9_qid_t {
/* Room for readdir header */
#define P9_READDIRHDRSZ 24
-/**
- * struct p9_str - length prefixed string type
- * @len: length of the string
- * @str: the string
- *
- * The protocol uses length prefixed strings for all
- * string data, so we replicate that for our internal
- * string members.
- */
-
-struct p9_str {
- u16 len;
- char *str;
-};
+/* size of header for zero copy read/write */
+#define P9_ZC_HDR_SZ 4096
/**
* struct p9_qid - file system entity information
@@ -362,20 +372,20 @@ struct p9_qid {
};
/**
- * struct p9_stat - file system metadata information
+ * struct p9_wstat - file system metadata information
* @size: length prefix for this stat structure instance
- * @type: the type of the server (equivilent to a major number)
- * @dev: the sub-type of the server (equivilent to a minor number)
+ * @type: the type of the server (equivalent to a major number)
+ * @dev: the sub-type of the server (equivalent to a minor number)
* @qid: unique id from the server of type &p9_qid
* @mode: Plan 9 format permissions of type &p9_perm_t
* @atime: Last access/read time
* @mtime: Last modify/write time
* @length: file length
- * @name: last element of path (aka filename) in type &p9_str
- * @uid: owner name in type &p9_str
- * @gid: group owner in type &p9_str
- * @muid: last modifier in type &p9_str
- * @extension: area used to encode extended UNIX support in type &p9_str
+ * @name: last element of path (aka filename)
+ * @uid: owner name
+ * @gid: group owner
+ * @muid: last modifier
+ * @extension: area used to encode extended UNIX support
* @n_uid: numeric user id of owner (part of 9p2000.u extension)
* @n_gid: numeric group id (part of 9p2000.u extension)
* @n_muid: numeric user id of laster modifier (part of 9p2000.u extension)
@@ -397,17 +407,17 @@ struct p9_wstat {
char *gid;
char *muid;
char *extension; /* 9p2000.u extensions */
- u32 n_uid; /* 9p2000.u extensions */
- u32 n_gid; /* 9p2000.u extensions */
- u32 n_muid; /* 9p2000.u extensions */
+ kuid_t n_uid; /* 9p2000.u extensions */
+ kgid_t n_gid; /* 9p2000.u extensions */
+ kuid_t n_muid; /* 9p2000.u extensions */
};
struct p9_stat_dotl {
u64 st_result_mask;
struct p9_qid qid;
u32 st_mode;
- u32 st_uid;
- u32 st_gid;
+ kuid_t st_uid;
+ kgid_t st_gid;
u64 st_nlink;
u64 st_rdev;
u64 st_size;
@@ -461,8 +471,8 @@ struct p9_stat_dotl {
struct p9_iattr_dotl {
u32 valid;
u32 mode;
- u32 uid;
- u32 gid;
+ kuid_t uid;
+ kgid_t gid;
u64 size;
u64 atime_sec;
u64 atime_nsec;
@@ -512,11 +522,6 @@ struct p9_getlock {
char *client_id;
};
-/* Structures for Protocol Operations */
-struct p9_tstatfs {
- u32 fid;
-};
-
struct p9_rstatfs {
u32 type;
u32 bsize;
@@ -529,166 +534,13 @@ struct p9_rstatfs {
u32 namelen;
};
-struct p9_trename {
- u32 fid;
- u32 newdirfid;
- struct p9_str name;
-};
-
-struct p9_rrename {
-};
-
-struct p9_tversion {
- u32 msize;
- struct p9_str version;
-};
-
-struct p9_rversion {
- u32 msize;
- struct p9_str version;
-};
-
-struct p9_tauth {
- u32 afid;
- struct p9_str uname;
- struct p9_str aname;
- u32 n_uname; /* 9P2000.u extensions */
-};
-
-struct p9_rauth {
- struct p9_qid qid;
-};
-
-struct p9_rerror {
- struct p9_str error;
- u32 errno; /* 9p2000.u extension */
-};
-
-struct p9_tflush {
- u16 oldtag;
-};
-
-struct p9_rflush {
-};
-
-struct p9_tattach {
- u32 fid;
- u32 afid;
- struct p9_str uname;
- struct p9_str aname;
- u32 n_uname; /* 9P2000.u extensions */
-};
-
-struct p9_rattach {
- struct p9_qid qid;
-};
-
-struct p9_twalk {
- u32 fid;
- u32 newfid;
- u16 nwname;
- struct p9_str wnames[16];
-};
-
-struct p9_rwalk {
- u16 nwqid;
- struct p9_qid wqids[16];
-};
-
-struct p9_topen {
- u32 fid;
- u8 mode;
-};
-
-struct p9_ropen {
- struct p9_qid qid;
- u32 iounit;
-};
-
-struct p9_tcreate {
- u32 fid;
- struct p9_str name;
- u32 perm;
- u8 mode;
- struct p9_str extension;
-};
-
-struct p9_rcreate {
- struct p9_qid qid;
- u32 iounit;
-};
-
-struct p9_tread {
- u32 fid;
- u64 offset;
- u32 count;
-};
-
-struct p9_rread {
- u32 count;
- u8 *data;
-};
-
-struct p9_twrite {
- u32 fid;
- u64 offset;
- u32 count;
- u8 *data;
-};
-
-struct p9_rwrite {
- u32 count;
-};
-
-struct p9_treaddir {
- u32 fid;
- u64 offset;
- u32 count;
-};
-
-struct p9_rreaddir {
- u32 count;
- u8 *data;
-};
-
-
-struct p9_tclunk {
- u32 fid;
-};
-
-struct p9_rclunk {
-};
-
-struct p9_tremove {
- u32 fid;
-};
-
-struct p9_rremove {
-};
-
-struct p9_tstat {
- u32 fid;
-};
-
-struct p9_rstat {
- struct p9_wstat stat;
-};
-
-struct p9_twstat {
- u32 fid;
- struct p9_wstat stat;
-};
-
-struct p9_rwstat {
-};
-
/**
* struct p9_fcall - primary packet structure
* @size: prefixed length of the structure
* @id: protocol operating identifier of type &p9_msg_t
* @tag: transaction id of the request
- * @offset: used by marshalling routines to track currentposition in buffer
- * @capacity: used by marshalling routines to track total capacity
+ * @offset: used by marshalling routines to track current position in buffer
+ * @capacity: used by marshalling routines to track total malloc'd capacity
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@@ -706,7 +558,7 @@ struct p9_fcall {
size_t offset;
size_t capacity;
- uint8_t *sdata;
+ u8 *sdata;
};
struct p9_idpool;
@@ -720,7 +572,6 @@ void p9_idpool_put(int id, struct p9_idpool *p);
int p9_idpool_check(int id, struct p9_idpool *p);
int p9_error_init(void);
-int p9_errstr2errno(char *, int);
int p9_trans_fd_init(void);
void p9_trans_fd_exit(void);
#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 83ba6a4d58a..6fab66c5c5a 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -26,6 +26,8 @@
#ifndef NET_9P_CLIENT_H
#define NET_9P_CLIENT_H
+#include <linux/utsname.h>
+
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -36,9 +38,9 @@
*/
enum p9_proto_versions{
- p9_proto_legacy = 0,
- p9_proto_2000u = 1,
- p9_proto_2000L = 2,
+ p9_proto_legacy,
+ p9_proto_2000u,
+ p9_proto_2000L,
};
@@ -60,12 +62,11 @@ enum p9_trans_status {
};
/**
- * enum p9_req_status_t - virtio request status
+ * enum p9_req_status_t - status of a request
* @REQ_STATUS_IDLE: request slot unused
* @REQ_STATUS_ALLOC: request has been allocated but not sent
* @REQ_STATUS_UNSENT: request waiting to be sent
* @REQ_STATUS_SENT: request sent to server
- * @REQ_STATUS_FLSH: a flush has been sent for this request
* @REQ_STATUS_RCVD: response received from server
* @REQ_STATUS_FLSHD: request has been flushed
* @REQ_STATUS_ERROR: request encountered an error on the client side
@@ -81,7 +82,6 @@ enum p9_req_status_t {
REQ_STATUS_ALLOC,
REQ_STATUS_UNSENT,
REQ_STATUS_SENT,
- REQ_STATUS_FLSH,
REQ_STATUS_RCVD,
REQ_STATUS_FLSHD,
REQ_STATUS_ERROR,
@@ -101,7 +101,7 @@ enum p9_req_status_t {
* Transport use an array to track outstanding requests
* instead of a list. While this may incurr overhead during initial
* allocation or expansion, it makes request lookup much easier as the
- * tag id is a index into an array. (We use tag+1 so that we can accomodate
+ * tag id is a index into an array. (We use tag+1 so that we can accommodate
* the -1 tag for the T_VERSION request).
* This also has the nice effect of only having to allocate wait_queues
* once, instead of constantly allocating and freeing them. Its possible
@@ -128,12 +128,12 @@ struct p9_req_t {
* @proto_version: 9P protocol version to use
* @trans_mod: module API instantiated with this client
* @trans: tranport instance state and API
- * @conn: connection state information used by trans_fd
* @fidpool: fid handle accounting for session
* @fidlist: List of active fid handles
* @tagpool - transaction id accounting for session
* @reqs - 2D array of requests
* @max_tag - current maximum tag id allocated
+ * @name - node name used as client id
*
* The client structure is used to keep track of various per-client
* state that has been instantiated.
@@ -151,12 +151,11 @@ struct p9_req_t {
struct p9_client {
spinlock_t lock; /* protect client structure */
- int msize;
+ unsigned int msize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
void *trans;
- struct p9_conn *conn;
struct p9_idpool *fidpool;
struct list_head fidlist;
@@ -164,6 +163,8 @@ struct p9_client {
struct p9_idpool *tagpool;
struct p9_req_t *reqs[P9_ROW_MAXTAG];
int max_tag;
+
+ char name[__NEW_UTS_LEN + 1];
};
/**
@@ -187,12 +188,12 @@ struct p9_fid {
int mode;
struct p9_qid qid;
u32 iounit;
- uid_t uid;
+ kuid_t uid;
void *rdir;
struct list_head flist;
- struct list_head dlist; /* list of all fids attached to a dentry */
+ struct hlist_node dlist; /* list of all fids attached to a dentry */
};
/**
@@ -211,33 +212,37 @@ struct p9_dirent {
};
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
-int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name);
+int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
+ const char *name);
+int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
+ struct p9_fid *newdirfid, const char *new_name);
struct p9_client *p9_client_create(const char *dev_name, char *options);
void p9_client_destroy(struct p9_client *clnt);
void p9_client_disconnect(struct p9_client *clnt);
void p9_client_begin_disconnect(struct p9_client *clnt);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
- char *uname, u32 n_uname, char *aname);
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
- int clone);
+ char *uname, kuid_t n_uname, char *aname);
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+ char **wnames, int clone);
int p9_client_open(struct p9_fid *fid, int mode);
int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
char *extension);
int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname);
-int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, gid_t gid,
+int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid,
struct p9_qid *qid);
int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
- gid_t gid, struct p9_qid *qid);
+ kgid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
+int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
u64 offset, u32 count);
int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version);
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent);
struct p9_wstat *p9_client_stat(struct p9_fid *fid);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr);
@@ -246,16 +251,16 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask);
int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode,
- dev_t rdev, gid_t gid, struct p9_qid *);
+ dev_t rdev, kgid_t gid, struct p9_qid *);
int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
- gid_t gid, struct p9_qid *);
+ kgid_t gid, struct p9_qid *);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
-void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(char *, int, struct p9_wstat *, int);
+int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
void p9stat_free(struct p9_wstat *);
int p9_is_proto_dotu(struct p9_client *clnt);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 6d5886efb10..d9fa68f26c4 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,21 +26,28 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#define P9_DEF_MIN_RESVPORT (665U)
+#define P9_DEF_MAX_RESVPORT (1023U)
+
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
+ * @pref: Preferences of this transport
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
+ * @close: member function to discard a connection on this transport
* @request: member function to issue a request to the transport
* @cancel: member function to cancel a request (if it hasn't been sent)
+ * @cancelled: member function to notify that a cancelled request will not
+ * not receive a reply
*
* This is the basic API for a transport module which is registered by the
* transport module with the 9P core network module and used by the client
* to instantiate a new connection on a transport.
*
- * BUGS: the transport module list isn't protected.
+ * The transport module list is protected by v9fs_trans_lock.
*/
struct p9_trans_module {
@@ -53,11 +60,14 @@ struct p9_trans_module {
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
+ int (*cancelled)(struct p9_client *, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *,
+ char *, char *, int , int, int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
-struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name);
+struct p9_trans_module *v9fs_get_trans_by_name(char *s);
struct p9_trans_module *v9fs_get_default_trans(void);
void v9fs_put_trans(struct p9_trans_module *m);
#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/Space.h b/include/net/Space.h
new file mode 100644
index 00000000000..8a32771e421
--- /dev/null
+++ b/include/net/Space.h
@@ -0,0 +1,31 @@
+/* A unified ethernet device probe. This is the easiest way to have every
+ * ethernet adaptor have the name "eth[0123...]".
+ */
+
+struct net_device *hp100_probe(int unit);
+struct net_device *ultra_probe(int unit);
+struct net_device *wd_probe(int unit);
+struct net_device *ne_probe(int unit);
+struct net_device *fmv18x_probe(int unit);
+struct net_device *i82596_probe(int unit);
+struct net_device *ni65_probe(int unit);
+struct net_device *sonic_probe(int unit);
+struct net_device *smc_init(int unit);
+struct net_device *atarilance_probe(int unit);
+struct net_device *sun3lance_probe(int unit);
+struct net_device *sun3_82586_probe(int unit);
+struct net_device *apne_probe(int unit);
+struct net_device *cs89x0_probe(int unit);
+struct net_device *mvme147lance_probe(int unit);
+struct net_device *tc515_probe(int unit);
+struct net_device *lance_probe(int unit);
+struct net_device *mac8390_probe(int unit);
+struct net_device *mac89x0_probe(int unit);
+struct net_device *cops_probe(int unit);
+struct net_device *ltpc_probe(void);
+
+/* Fibre Channel adapters */
+int iph5526_probe(struct net_device *dev);
+
+/* SBNI adapters */
+int sbni_probe(int unit);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index bab385f13ac..3ee4c92afd1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -9,7 +9,7 @@
#include <net/pkt_sched.h>
struct tcf_common {
- struct tcf_common *tcfc_next;
+ struct hlist_node tcfc_head;
u32 tcfc_index;
int tcfc_refcnt;
int tcfc_bindcnt;
@@ -18,11 +18,11 @@ struct tcf_common {
struct tcf_t tcfc_tm;
struct gnet_stats_basic_packed tcfc_bstats;
struct gnet_stats_queue tcfc_qstats;
- struct gnet_stats_rate_est tcfc_rate_est;
+ struct gnet_stats_rate_est64 tcfc_rate_est;
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
};
-#define tcf_next common.tcfc_next
+#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
#define tcf_refcnt common.tcfc_refcnt
#define tcf_bindcnt common.tcfc_bindcnt
@@ -35,25 +35,11 @@ struct tcf_common {
#define tcf_lock common.tcfc_lock
#define tcf_rcu common.tcfc_rcu
-struct tcf_police {
- struct tcf_common common;
- int tcfp_result;
- u32 tcfp_ewma_rate;
- u32 tcfp_burst;
- u32 tcfp_mtu;
- u32 tcfp_toks;
- u32 tcfp_ptoks;
- psched_time_t tcfp_t_c;
- struct qdisc_rate_table *tcfp_R_tab;
- struct qdisc_rate_table *tcfp_P_tab;
-};
-#define to_police(pc) \
- container_of(pc, struct tcf_police, common)
-
struct tcf_hashinfo {
- struct tcf_common **htab;
+ struct hlist_head *htab;
unsigned int hmask;
- rwlock_t *lock;
+ spinlock_t lock;
+ u32 index;
};
static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
@@ -61,66 +47,80 @@ static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
return index & hmask;
}
+static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
+{
+ int i;
+
+ spin_lock_init(&hf->lock);
+ hf->index = 0;
+ hf->hmask = mask;
+ hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!hf->htab)
+ return -ENOMEM;
+ for (i = 0; i < mask + 1; i++)
+ INIT_HLIST_HEAD(&hf->htab[i]);
+ return 0;
+}
+
+static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
+{
+ kfree(hf->htab);
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
#define ACT_P_DELETED 1
-struct tcf_act_hdr {
- struct tcf_common common;
-};
-
struct tc_action {
void *priv;
- struct tc_action_ops *ops;
+ const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
- struct tc_action *next;
+ struct list_head list;
};
-#define TCA_CAP_NONE 0
struct tc_action_ops {
- struct tc_action_ops *next;
+ struct list_head head;
struct tcf_hashinfo *hinfo;
char kind[IFNAMSIZ];
__u32 type; /* TBD to match kind */
- __u32 capab; /* capabilities includes 4 bit version */
struct module *owner;
- int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *);
- int (*get_stats)(struct sk_buff *, struct tc_action *);
+ int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
- int (*cleanup)(struct tc_action *, int bind);
+ void (*cleanup)(struct tc_action *, int bind);
int (*lookup)(struct tc_action *, u32);
- int (*init)(struct nlattr *, struct nlattr *, struct tc_action *, int , int);
+ int (*init)(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *act, int ovr,
+ int bind);
int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);
};
-extern struct tcf_common *tcf_hash_lookup(u32 index,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo);
-extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a);
-extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_search(struct tc_action *a, u32 index);
-extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
- int bind, struct tcf_hashinfo *hinfo);
-extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size,
- int bind, u32 *idx_gen,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_search(struct tc_action *a, u32 index);
+void tcf_hash_destroy(struct tc_action *a);
+int tcf_hash_release(struct tc_action *a, int bind);
+u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
+int tcf_hash_check(u32 index, struct tc_action *a, int bind);
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ int size, int bind);
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
+void tcf_hash_insert(struct tc_action *a);
-extern int tcf_register_action(struct tc_action_ops *a);
-extern int tcf_unregister_action(struct tc_action_ops *a);
-extern void tcf_action_destroy(struct tc_action *a, int bind);
-extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res);
-extern struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind);
-extern struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind);
-extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int);
+int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
+int tcf_unregister_action(struct tc_action_ops *a);
+int tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
+ struct tcf_result *res);
+int tcf_action_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind, struct list_head *);
+struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind);
+int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
+int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 23710aa6a18..f679877bb60 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,13 +8,17 @@
#define TEMP_VALID_LIFETIME (7*86400)
#define TEMP_PREFERRED_LIFETIME (86400)
-#define REGEN_MAX_RETRY (5)
+#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
#define ADDR_CHECK_FREQUENCY (120*HZ)
#define IPV6_MAX_ADDRESSES 16
+#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ / 50 : 1)
+#define ADDRCONF_TIMER_FUZZ (HZ / 4)
+#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+
#include <linux/in.h>
#include <linux/in6.h>
@@ -42,59 +46,50 @@ struct prefix_info {
};
-#ifdef __KERNEL__
-
#include <linux/netdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
-#define IN6_ADDR_HSIZE 16
+#define IN6_ADDR_HSIZE_SHIFT 4
+#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
-extern int addrconf_init(void);
-extern void addrconf_cleanup(void);
+int addrconf_init(void);
+void addrconf_cleanup(void);
-extern int addrconf_add_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_del_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_set_dstaddr(struct net *net,
- void __user *arg);
+int addrconf_add_ifaddr(struct net *net, void __user *arg);
+int addrconf_del_ifaddr(struct net *net, void __user *arg);
+int addrconf_set_dstaddr(struct net *net, void __user *arg);
-extern int ipv6_chk_addr(struct net *net,
- struct in6_addr *addr,
- struct net_device *dev,
- int strict);
+int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-extern int ipv6_chk_home_addr(struct net *net,
- struct in6_addr *addr);
+int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
#endif
-extern int ipv6_chk_prefix(struct in6_addr *addr,
- struct net_device *dev);
-
-extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
- const struct in6_addr *addr,
- struct net_device *dev,
- int strict);
-
-extern int ipv6_dev_get_saddr(struct net *net,
- struct net_device *dev,
- const struct in6_addr *daddr,
- unsigned int srcprefs,
- struct in6_addr *saddr);
-extern int ipv6_get_lladdr(struct net_device *dev,
- struct in6_addr *addr,
- unsigned char banned_flags);
-extern int ipv6_rcv_saddr_equal(const struct sock *sk,
- const struct sock *sk2);
-extern void addrconf_join_solict(struct net_device *dev,
- struct in6_addr *addr);
-extern void addrconf_leave_solict(struct inet6_dev *idev,
- struct in6_addr *addr);
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+ const unsigned int prefix_len,
+ struct net_device *dev);
+
+int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+
+struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
+ const struct in6_addr *addr,
+ struct net_device *dev, int strict);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ u32 banned_flags);
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ u32 banned_flags);
+int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
+void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
+void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
- unsigned unit)
+ unsigned int unit)
{
if (timeout == 0xffffffff)
return ~0UL;
@@ -118,60 +113,109 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
/*
* IPv6 Address Label subsystem (addrlabel.c)
*/
-extern int ipv6_addr_label_init(void);
-extern void ipv6_addr_label_cleanup(void);
-extern void ipv6_addr_label_rtnl_register(void);
-extern u32 ipv6_addr_label(struct net *net,
- const struct in6_addr *addr,
- int type, int ifindex);
+int ipv6_addr_label_init(void);
+void ipv6_addr_label_cleanup(void);
+void ipv6_addr_label_rtnl_register(void);
+u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
+ int type, int ifindex);
/*
* multicast prototypes (mcast.c)
*/
-extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern void ipv6_sock_mc_close(struct sock *sk);
-extern int inet6_mc_check(struct sock *sk,
- const struct in6_addr *mc_addr,
- const struct in6_addr *src_addr);
-
-extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
-extern void ipv6_mc_up(struct inet6_dev *idev);
-extern void ipv6_mc_down(struct inet6_dev *idev);
-extern void ipv6_mc_unmap(struct inet6_dev *idev);
-extern void ipv6_mc_remap(struct inet6_dev *idev);
-extern void ipv6_mc_init_dev(struct inet6_dev *idev);
-extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
-
-extern int ipv6_chk_mcast_addr(struct net_device *dev,
- const struct in6_addr *group,
- const struct in6_addr *src_addr);
-extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
-
-extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len);
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_mc_close(struct sock *sk);
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr);
+
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
+void ipv6_mc_up(struct inet6_dev *idev);
+void ipv6_mc_down(struct inet6_dev *idev);
+void ipv6_mc_unmap(struct inet6_dev *idev);
+void ipv6_mc_remap(struct inet6_dev *idev);
+void ipv6_mc_init_dev(struct inet6_dev *idev);
+void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+void addrconf_dad_failure(struct inet6_ifaddr *ifp);
+
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr);
+
+void ipv6_mc_dad_complete(struct inet6_dev *idev);
+
+/* A stub used by vxlan module. This is ugly, ideally these
+ * symbols should be built into the core kernel.
+ */
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+ struct neigh_table *nd_tbl;
+};
+extern const struct ipv6_stub *ipv6_stub __read_mostly;
/*
- * anycast prototypes (anycast.c)
+ * identify MLD packets for MLD filter exceptions
*/
-extern int ipv6_sock_ac_join(struct sock *sk,int ifindex,struct in6_addr *addr);
-extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex,struct in6_addr *addr);
-extern void ipv6_sock_ac_close(struct sock *sk);
-extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex);
+static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
+{
+ struct icmp6hdr *hdr;
+
+ if (nexthdr != IPPROTO_ICMPV6 ||
+ !pskb_network_may_pull(skb, offset + sizeof(struct icmp6hdr)))
+ return false;
+
+ hdr = (struct icmp6hdr *)(skb_network_header(skb) + offset);
+
+ switch (hdr->icmp6_type) {
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_MLD2_REPORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
-extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr);
-extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr);
-extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
- struct in6_addr *addr);
+void addrconf_prefix_rcv(struct net_device *dev,
+ u8 *opt, int len, bool sllao);
+/*
+ * anycast prototypes (anycast.c)
+ */
+int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_ac_close(struct sock *sk);
+
+int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr);
+bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr);
/* Device notifier */
-extern int register_inet6addr_notifier(struct notifier_block *nb);
-extern int unregister_inet6addr_notifier(struct notifier_block *nb);
+int register_inet6addr_notifier(struct notifier_block *nb);
+int unregister_inet6addr_notifier(struct notifier_block *nb);
+int inet6addr_notifier_call_chain(unsigned long val, void *v);
+
+void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv6_devconf *devconf);
/**
* __in6_dev_get - get inet6_dev pointer from netdevice
@@ -205,7 +249,14 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
return idev;
}
-extern void in6_dev_finish_destroy(struct inet6_dev *idev);
+static inline struct neigh_parms *__in6_dev_nd_parms_get_rcu(const struct net_device *dev)
+{
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
+ return idev ? idev->nd_parms : NULL;
+}
+
+void in6_dev_finish_destroy(struct inet6_dev *idev);
static inline void in6_dev_put(struct inet6_dev *idev)
{
@@ -223,7 +274,7 @@ static inline void in6_dev_hold(struct inet6_dev *idev)
atomic_inc(&idev->refcnt);
}
-extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
{
@@ -255,34 +306,53 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
htonl(0xFF000000) | addr->s6_addr32[3]);
}
-static inline int ipv6_addr_is_multicast(const struct in6_addr *addr)
-{
- return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
-}
-
-static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
+static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
+#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
addr->s6_addr32[1] | addr->s6_addr32[2] |
(addr->s6_addr32[3] ^ htonl(0x00000001))) == 0;
+#endif
}
-static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
+static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
+#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
addr->s6_addr32[1] | addr->s6_addr32[2] |
(addr->s6_addr32[3] ^ htonl(0x00000002))) == 0;
+#endif
}
-static inline int ipv6_addr_is_isatap(const struct in6_addr *addr)
+static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
{
return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
}
-#ifdef CONFIG_PROC_FS
-extern int if6_proc_init(void);
-extern void if6_proc_exit(void);
+static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__be64 *)addr;
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
+ ((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
+ cpu_to_be64(0xffffffffff000000UL))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] |
+ (addr->s6_addr32[2] ^ htonl(0x00000001)) |
+ (addr->s6_addr[12] ^ 0xff)) == 0;
#endif
+}
+#ifdef CONFIG_PROC_FS
+int if6_proc_init(void);
+void if6_proc_exit(void);
#endif
+
#endif
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index 75e64c7a296..085940f7eee 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -36,7 +36,7 @@ enum {
/* address length, octets */
#define IEEE802154_ADDR_LEN 8
-struct ieee802154_addr {
+struct ieee802154_addr_sa {
int addr_type;
u16 pan_id;
union {
@@ -51,12 +51,20 @@ struct ieee802154_addr {
struct sockaddr_ieee802154 {
sa_family_t family; /* AF_IEEE802154 */
- struct ieee802154_addr addr;
+ struct ieee802154_addr_sa addr;
};
/* get/setsockopt */
#define SOL_IEEE802154 0
-#define WPAN_WANTACK 0
+#define WPAN_WANTACK 0
+#define WPAN_SECURITY 1
+#define WPAN_SECURITY_LEVEL 2
+
+#define WPAN_SECURITY_DEFAULT 0
+#define WPAN_SECURITY_OFF 1
+#define WPAN_SECURITY_ON 2
+
+#define WPAN_SECURITY_LEVEL_DEFAULT (-1)
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 00c2eaa07c2..e797d45a5ae 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,8 +12,6 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
-#ifdef __KERNEL__
-
#include <linux/rxrpc.h>
struct rxrpc_call;
@@ -33,25 +31,21 @@ enum {
typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
struct sk_buff *);
-extern void rxrpc_kernel_intercept_rx_messages(struct socket *,
- rxrpc_interceptor_t);
-extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
- struct sockaddr_rxrpc *,
- struct key *,
- unsigned long,
- gfp_t);
-extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *,
- size_t);
-extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-extern void rxrpc_kernel_end_call(struct rxrpc_call *);
-extern bool rxrpc_kernel_is_data_last(struct sk_buff *);
-extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-extern int rxrpc_kernel_get_error_number(struct sk_buff *);
-extern void rxrpc_kernel_data_delivered(struct sk_buff *);
-extern void rxrpc_kernel_free_skb(struct sk_buff *);
-extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *,
- unsigned long);
-extern int rxrpc_kernel_reject_call(struct socket *);
+void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+ struct sockaddr_rxrpc *,
+ struct key *,
+ unsigned long,
+ gfp_t);
+int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
+void rxrpc_kernel_end_call(struct rxrpc_call *);
+bool rxrpc_kernel_is_data_last(struct sk_buff *);
+u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
+int rxrpc_kernel_get_error_number(struct sk_buff *);
+void rxrpc_kernel_data_delivered(struct sk_buff *);
+void rxrpc_kernel_free_skb(struct sk_buff *);
+struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
+int rxrpc_kernel_reject_call(struct socket *);
-#endif /* __KERNEL__ */
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 90c9e2872f2..a175ba4a7ad 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,29 +6,36 @@
#include <linux/mutex.h>
#include <net/sock.h>
-extern void unix_inflight(struct file *fp);
-extern void unix_notinflight(struct file *fp);
-extern void unix_gc(void);
-extern void wait_for_unix_gc(void);
+void unix_inflight(struct file *fp);
+void unix_notinflight(struct file *fp);
+void unix_gc(void);
+void wait_for_unix_gc(void);
+struct sock *unix_get_socket(struct file *filp);
+struct sock *unix_peer_get(struct sock *);
#define UNIX_HASH_SIZE 256
+#define UNIX_HASH_BITS 8
extern unsigned int unix_tot_inflight;
+extern spinlock_t unix_table_lock;
+extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
struct unix_address {
atomic_t refcnt;
int len;
- unsigned hash;
+ unsigned int hash;
struct sockaddr_un name[0];
};
struct unix_skb_parms {
struct pid *pid; /* Skb credentials */
- const struct cred *cred;
+ kuid_t uid;
+ kgid_t gid;
struct scm_fp_list *fp; /* Passed files */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
#endif
+ u32 consumed;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
@@ -40,34 +47,35 @@ struct unix_skb_parms {
spin_lock_nested(&unix_sk(s)->lock, \
SINGLE_DEPTH_NESTING)
-#ifdef __KERNEL__
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
struct unix_address *addr;
- struct dentry *dentry;
- struct vfsmount *mnt;
+ struct path path;
struct mutex readlock;
struct sock *peer;
- struct sock *other;
struct list_head link;
atomic_long_t inflight;
spinlock_t lock;
- unsigned int gc_candidate : 1;
- unsigned int gc_maybe_cycle : 1;
+ unsigned char recursion_level;
+ unsigned long gc_flags;
+#define UNIX_GC_CANDIDATE 0
+#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
#define peer_wait peer_wq.wait
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
#ifdef CONFIG_SYSCTL
-extern int unix_sysctl_register(struct net *net);
-extern void unix_sysctl_unregister(struct net *net);
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
#else
static inline int unix_sysctl_register(struct net *net) { return 0; }
static inline void unix_sysctl_unregister(struct net *net) {}
#endif
#endif
-#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
new file mode 100644
index 00000000000..42827786940
--- /dev/null
+++ b/include/net/af_vsock.h
@@ -0,0 +1,179 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __AF_VSOCK_H__
+#define __AF_VSOCK_H__
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/vm_sockets.h>
+
+#include "vsock_addr.h"
+
+#define LAST_RESERVED_PORT 1023
+
+#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
+#define sk_vsock(__vsk) (&(__vsk)->sk)
+
+struct vsock_sock {
+ /* sk must be the first member. */
+ struct sock sk;
+ struct sockaddr_vm local_addr;
+ struct sockaddr_vm remote_addr;
+ /* Links for the global tables of bound and connected sockets. */
+ struct list_head bound_table;
+ struct list_head connected_table;
+ /* Accessed without the socket lock held. This means it can never be
+ * modified outsided of socket create or destruct.
+ */
+ bool trusted;
+ bool cached_peer_allow_dgram; /* Dgram communication allowed to
+ * cached peer?
+ */
+ u32 cached_peer; /* Context ID of last dgram destination check. */
+ const struct cred *owner;
+ /* Rest are SOCK_STREAM only. */
+ long connect_timeout;
+ /* Listening socket that this came from. */
+ struct sock *listener;
+ /* Used for pending list and accept queue during connection handshake.
+ * The listening socket is the head for both lists. Sockets created
+ * for connection requests are placed in the pending list until they
+ * are connected, at which point they are put in the accept queue list
+ * so they can be accepted in accept(). If accept() cannot accept the
+ * connection, it is marked as rejected so the cleanup function knows
+ * to clean up the socket.
+ */
+ struct list_head pending_links;
+ struct list_head accept_queue;
+ bool rejected;
+ struct delayed_work dwork;
+ u32 peer_shutdown;
+ bool sent_request;
+ bool ignore_connecting_rst;
+
+ /* Private to transport. */
+ void *trans;
+};
+
+s64 vsock_stream_has_data(struct vsock_sock *vsk);
+s64 vsock_stream_has_space(struct vsock_sock *vsk);
+void vsock_pending_work(struct work_struct *work);
+struct sock *__vsock_create(struct net *net,
+ struct socket *sock,
+ struct sock *parent,
+ gfp_t priority, unsigned short type);
+
+/**** TRANSPORT ****/
+
+struct vsock_transport_recv_notify_data {
+ u64 data1; /* Transport-defined. */
+ u64 data2; /* Transport-defined. */
+ bool notify_on_block;
+};
+
+struct vsock_transport_send_notify_data {
+ u64 data1; /* Transport-defined. */
+ u64 data2; /* Transport-defined. */
+};
+
+struct vsock_transport {
+ /* Initialize/tear-down socket. */
+ int (*init)(struct vsock_sock *, struct vsock_sock *);
+ void (*destruct)(struct vsock_sock *);
+ void (*release)(struct vsock_sock *);
+
+ /* Connections. */
+ int (*connect)(struct vsock_sock *);
+
+ /* DGRAM. */
+ int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
+ int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
+ struct msghdr *msg, size_t len, int flags);
+ int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
+ struct iovec *, size_t len);
+ bool (*dgram_allow)(u32 cid, u32 port);
+
+ /* STREAM. */
+ /* TODO: stream_bind() */
+ ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
+ size_t len, int flags);
+ ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
+ size_t len);
+ s64 (*stream_has_data)(struct vsock_sock *);
+ s64 (*stream_has_space)(struct vsock_sock *);
+ u64 (*stream_rcvhiwat)(struct vsock_sock *);
+ bool (*stream_is_active)(struct vsock_sock *);
+ bool (*stream_allow)(u32 cid, u32 port);
+
+ /* Notification. */
+ int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
+ int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
+ int (*notify_recv_init)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
+ struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
+ ssize_t, bool, struct vsock_transport_recv_notify_data *);
+ int (*notify_send_init)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_block)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_enqueue)(struct vsock_sock *,
+ struct vsock_transport_send_notify_data *);
+ int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
+ struct vsock_transport_send_notify_data *);
+
+ /* Shutdown. */
+ int (*shutdown)(struct vsock_sock *, int);
+
+ /* Buffer sizes. */
+ void (*set_buffer_size)(struct vsock_sock *, u64);
+ void (*set_min_buffer_size)(struct vsock_sock *, u64);
+ void (*set_max_buffer_size)(struct vsock_sock *, u64);
+ u64 (*get_buffer_size)(struct vsock_sock *);
+ u64 (*get_min_buffer_size)(struct vsock_sock *);
+ u64 (*get_max_buffer_size)(struct vsock_sock *);
+
+ /* Addressing. */
+ u32 (*get_local_cid)(void);
+};
+
+/**** CORE ****/
+
+int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
+static inline int vsock_core_init(const struct vsock_transport *t)
+{
+ return __vsock_core_init(t, THIS_MODULE);
+}
+void vsock_core_exit(void);
+
+/**** UTILS ****/
+
+void vsock_release_pending(struct sock *pending);
+void vsock_add_pending(struct sock *listener, struct sock *pending);
+void vsock_remove_pending(struct sock *listener, struct sock *pending);
+void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
+void vsock_insert_connected(struct vsock_sock *vsk);
+void vsock_remove_bound(struct vsock_sock *vsk);
+void vsock_remove_connected(struct vsock_sock *vsk);
+struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
+struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst);
+void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+
+#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f79a31..ca95b98969d 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
#include <linux/skbuff.h>
/* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN 12
+#define MAX_AH_AUTH_LEN 64
struct crypto_ahash;
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce6658..73c49864076 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -3,27 +3,64 @@
#define _ARP_H
#include <linux/if_arp.h>
+#include <linux/hash.h>
#include <net/neighbour.h>
extern struct neigh_table arp_tbl;
-extern void arp_init(void);
-extern int arp_find(unsigned char *haddr, struct sk_buff *skb);
-extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern void arp_send(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw, const unsigned char *th);
-extern int arp_bind_neighbour(struct dst_entry *dst);
-extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
-extern void arp_ifdown(struct net_device *dev);
-
-extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw,
- const unsigned char *target_hw);
-extern void arp_xmit(struct sk_buff *skb);
+static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+{
+ u32 val = key ^ hash32_ptr(dev);
+
+ return val * hash_rnd;
+}
+
+static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
+ struct neighbour *n;
+ u32 hash_val;
+
+ hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ if (n->dev == dev && *(u32 *)n->primary_key == key)
+ return n;
+ }
+
+ return NULL;
+}
+
+static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
+{
+ struct neighbour *n;
+
+ rcu_read_lock_bh();
+ n = __ipv4_neigh_lookup_noref(dev, key);
+ if (n && !atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
+ rcu_read_unlock_bh();
+
+ return n;
+}
+
+void arp_init(void);
+int arp_find(unsigned char *haddr, struct sk_buff *skb);
+int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw, const unsigned char *th);
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+void arp_ifdown(struct net_device *dev);
+
+struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw);
+void arp_xmit(struct sk_buff *skb);
#endif /* _ARP_H */
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index 467c531b8a7..5865924d4aa 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -15,7 +15,6 @@
#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
-#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
struct sk_buff;
@@ -36,26 +35,18 @@ struct clip_vcc {
struct atmarp_entry {
- __be32 ip; /* IP address */
struct clip_vcc *vccs; /* active VCCs; NULL if resolution is
pending */
unsigned long expires; /* entry expiration time */
struct neighbour *neigh; /* neighbour back-pointer */
};
-
#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
-
struct clip_priv {
int number; /* for convenience ... */
spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
struct net_device *next; /* next CLIP interface */
};
-
-#ifdef __KERNEL__
-extern struct neigh_table *clip_tbl_hook;
-#endif
-
#endif
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 206d22297ac..bf0396e9a5d 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -11,7 +11,7 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define AX25_T1CLAMPLO 1
#define AX25_T1CLAMPHI (30 * HZ)
@@ -157,12 +157,12 @@ enum {
typedef struct ax25_uid_assoc {
struct hlist_node uid_node;
atomic_t refcount;
- uid_t uid;
+ kuid_t uid;
ax25_address call;
} ax25_uid_assoc;
-#define ax25_uid_for_each(__ax25, node, list) \
- hlist_for_each_entry(__ax25, node, list, uid_node)
+#define ax25_uid_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, uid_node)
#define ax25_uid_hold(ax25) \
atomic_inc(&((ax25)->refcount))
@@ -195,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
atomic_inc(&ax25_rt->refcount);
}
-extern void __ax25_put_route(ax25_route *ax25_rt);
+void __ax25_put_route(ax25_route *ax25_rt);
static inline void ax25_put_route(ax25_route *ax25_rt)
{
@@ -215,7 +215,7 @@ typedef struct ax25_dev {
struct ax25_dev *next;
struct net_device *dev;
struct net_device *forward;
- struct ctl_table *systable;
+ struct ctl_table_header *sysheader;
int values[AX25_MAX_VALUES];
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
@@ -247,8 +247,8 @@ typedef struct ax25_cb {
#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
-#define ax25_for_each(__ax25, node, list) \
- hlist_for_each_entry(__ax25, node, list, ax25_node)
+#define ax25_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, ax25_node)
#define ax25_cb_hold(__ax25) \
atomic_inc(&((__ax25)->refcount))
@@ -272,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev
/* af_ax25.c */
extern struct hlist_head ax25_list;
extern spinlock_t ax25_list_lock;
-extern void ax25_cb_add(ax25_cb *);
+void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
-extern void ax25_destroy_socket(ax25_cb *);
-extern ax25_cb * __must_check ax25_create_cb(void);
-extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
-extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ struct net_device *);
+void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+void ax25_destroy_socket(ax25_cb *);
+ax25_cb * __must_check ax25_create_cb(void);
+void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
/* ax25_addr.c */
extern const ax25_address ax25_bcast;
extern const ax25_address ax25_defaddr;
extern const ax25_address null_ax25_address;
-extern char *ax2asc(char *buf, const ax25_address *);
-extern void asc2ax(ax25_address *addr, const char *callsign);
-extern int ax25cmp(const ax25_address *, const ax25_address *);
-extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
-extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
+char *ax2asc(char *buf, const ax25_address *);
+void asc2ax(ax25_address *addr, const char *callsign);
+int ax25cmp(const ax25_address *, const ax25_address *);
+int ax25digicmp(const ax25_digi *, const ax25_digi *);
+const unsigned char *ax25_addr_parse(const unsigned char *, int,
ax25_address *, ax25_address *, ax25_digi *, int *, int *);
-extern int ax25_addr_build(unsigned char *, const ax25_address *,
- const ax25_address *, const ax25_digi *, int, int);
-extern int ax25_addr_size(const ax25_digi *);
-extern void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+int ax25_addr_build(unsigned char *, const ax25_address *,
+ const ax25_address *, const ax25_digi *, int, int);
+int ax25_addr_size(const ax25_digi *);
+void ax25_digi_invert(const ax25_digi *, ax25_digi *);
/* ax25_dev.c */
extern ax25_dev *ax25_dev_list;
@@ -306,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
return dev->ax25_ptr;
}
-extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
-extern void ax25_dev_device_up(struct net_device *);
-extern void ax25_dev_device_down(struct net_device *);
-extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
-extern struct net_device *ax25_fwd_dev(struct net_device *);
-extern void ax25_dev_free(void);
+ax25_dev *ax25_addr_ax25dev(ax25_address *);
+void ax25_dev_device_up(struct net_device *);
+void ax25_dev_device_down(struct net_device *);
+int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+struct net_device *ax25_fwd_dev(struct net_device *);
+void ax25_dev_free(void);
/* ax25_ds_in.c */
-extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_ds_subr.c */
-extern void ax25_ds_nr_error_recovery(ax25_cb *);
-extern void ax25_ds_enquiry_response(ax25_cb *);
-extern void ax25_ds_establish_data_link(ax25_cb *);
-extern void ax25_dev_dama_off(ax25_dev *);
-extern void ax25_dama_on(ax25_cb *);
-extern void ax25_dama_off(ax25_cb *);
+void ax25_ds_nr_error_recovery(ax25_cb *);
+void ax25_ds_enquiry_response(ax25_cb *);
+void ax25_ds_establish_data_link(ax25_cb *);
+void ax25_dev_dama_off(ax25_dev *);
+void ax25_dama_on(ax25_cb *);
+void ax25_dama_off(ax25_cb *);
/* ax25_ds_timer.c */
-extern void ax25_ds_setup_timer(ax25_dev *);
-extern void ax25_ds_set_timer(ax25_dev *);
-extern void ax25_ds_del_timer(ax25_dev *);
-extern void ax25_ds_timer(ax25_cb *);
-extern void ax25_ds_t1_timeout(ax25_cb *);
-extern void ax25_ds_heartbeat_expiry(ax25_cb *);
-extern void ax25_ds_t3timer_expiry(ax25_cb *);
-extern void ax25_ds_idletimer_expiry(ax25_cb *);
+void ax25_ds_setup_timer(ax25_dev *);
+void ax25_ds_set_timer(ax25_dev *);
+void ax25_ds_del_timer(ax25_dev *);
+void ax25_ds_timer(ax25_cb *);
+void ax25_ds_t1_timeout(ax25_cb *);
+void ax25_ds_heartbeat_expiry(ax25_cb *);
+void ax25_ds_t3timer_expiry(ax25_cb *);
+void ax25_ds_idletimer_expiry(ax25_cb *);
/* ax25_iface.c */
@@ -342,110 +343,112 @@ struct ax25_protocol {
int (*func)(struct sk_buff *, ax25_cb *);
};
-extern void ax25_register_pid(struct ax25_protocol *ap);
-extern void ax25_protocol_release(unsigned int);
+void ax25_register_pid(struct ax25_protocol *ap);
+void ax25_protocol_release(unsigned int);
struct ax25_linkfail {
struct hlist_node lf_node;
void (*func)(ax25_cb *, int);
};
-extern void ax25_linkfail_register(struct ax25_linkfail *lf);
-extern void ax25_linkfail_release(struct ax25_linkfail *lf);
-extern int __must_check ax25_listen_register(ax25_address *,
- struct net_device *);
-extern void ax25_listen_release(ax25_address *, struct net_device *);
-extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-extern int ax25_listen_mine(ax25_address *, struct net_device *);
-extern void ax25_link_failed(ax25_cb *, int);
-extern int ax25_protocol_is_registered(unsigned int);
+void ax25_linkfail_register(struct ax25_linkfail *lf);
+void ax25_linkfail_release(struct ax25_linkfail *lf);
+int __must_check ax25_listen_register(ax25_address *, struct net_device *);
+void ax25_listen_release(ax25_address *, struct net_device *);
+int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+int ax25_listen_mine(ax25_address *, struct net_device *);
+void ax25_link_failed(ax25_cb *, int);
+int ax25_protocol_is_registered(unsigned int);
/* ax25_in.c */
-extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
-extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
+ struct net_device *);
/* ax25_ip.c */
-extern int ax25_hard_header(struct sk_buff *, struct net_device *,
- unsigned short, const void *,
- const void *, unsigned int);
-extern int ax25_rebuild_header(struct sk_buff *);
+int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
+ const void *, const void *, unsigned int);
+int ax25_rebuild_header(struct sk_buff *);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_output(ax25_cb *, int, struct sk_buff *);
-extern void ax25_kick(ax25_cb *);
-extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
-extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
+ ax25_digi *, struct net_device *);
+void ax25_output(ax25_cb *, int, struct sk_buff *);
+void ax25_kick(ax25_cb *);
+void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+int ax25_check_iframes_acked(ax25_cb *, unsigned short);
/* ax25_route.c */
-extern void ax25_rt_device_down(struct net_device *);
-extern int ax25_rt_ioctl(unsigned int, void __user *);
+void ax25_rt_device_down(struct net_device *);
+int ax25_rt_ioctl(unsigned int, void __user *);
extern const struct file_operations ax25_route_fops;
-extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
-extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_rt_free(void);
+ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
+int ax25_rt_autobind(ax25_cb *, ax25_address *);
+struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
+ ax25_address *, ax25_digi *);
+void ax25_rt_free(void);
/* ax25_std_in.c */
-extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_std_subr.c */
-extern void ax25_std_nr_error_recovery(ax25_cb *);
-extern void ax25_std_establish_data_link(ax25_cb *);
-extern void ax25_std_transmit_enquiry(ax25_cb *);
-extern void ax25_std_enquiry_response(ax25_cb *);
-extern void ax25_std_timeout_response(ax25_cb *);
+void ax25_std_nr_error_recovery(ax25_cb *);
+void ax25_std_establish_data_link(ax25_cb *);
+void ax25_std_transmit_enquiry(ax25_cb *);
+void ax25_std_enquiry_response(ax25_cb *);
+void ax25_std_timeout_response(ax25_cb *);
/* ax25_std_timer.c */
-extern void ax25_std_heartbeat_expiry(ax25_cb *);
-extern void ax25_std_t1timer_expiry(ax25_cb *);
-extern void ax25_std_t2timer_expiry(ax25_cb *);
-extern void ax25_std_t3timer_expiry(ax25_cb *);
-extern void ax25_std_idletimer_expiry(ax25_cb *);
+void ax25_std_heartbeat_expiry(ax25_cb *);
+void ax25_std_t1timer_expiry(ax25_cb *);
+void ax25_std_t2timer_expiry(ax25_cb *);
+void ax25_std_t3timer_expiry(ax25_cb *);
+void ax25_std_idletimer_expiry(ax25_cb *);
/* ax25_subr.c */
-extern void ax25_clear_queues(ax25_cb *);
-extern void ax25_frames_acked(ax25_cb *, unsigned short);
-extern void ax25_requeue_frames(ax25_cb *);
-extern int ax25_validate_nr(ax25_cb *, unsigned short);
-extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
-extern void ax25_send_control(ax25_cb *, int, int, int);
-extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_calculate_t1(ax25_cb *);
-extern void ax25_calculate_rtt(ax25_cb *);
-extern void ax25_disconnect(ax25_cb *, int);
+void ax25_clear_queues(ax25_cb *);
+void ax25_frames_acked(ax25_cb *, unsigned short);
+void ax25_requeue_frames(ax25_cb *);
+int ax25_validate_nr(ax25_cb *, unsigned short);
+int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+void ax25_send_control(ax25_cb *, int, int, int);
+void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *,
+ ax25_digi *);
+void ax25_calculate_t1(ax25_cb *);
+void ax25_calculate_rtt(ax25_cb *);
+void ax25_disconnect(ax25_cb *, int);
/* ax25_timer.c */
-extern void ax25_setup_timers(ax25_cb *);
-extern void ax25_start_heartbeat(ax25_cb *);
-extern void ax25_start_t1timer(ax25_cb *);
-extern void ax25_start_t2timer(ax25_cb *);
-extern void ax25_start_t3timer(ax25_cb *);
-extern void ax25_start_idletimer(ax25_cb *);
-extern void ax25_stop_heartbeat(ax25_cb *);
-extern void ax25_stop_t1timer(ax25_cb *);
-extern void ax25_stop_t2timer(ax25_cb *);
-extern void ax25_stop_t3timer(ax25_cb *);
-extern void ax25_stop_idletimer(ax25_cb *);
-extern int ax25_t1timer_running(ax25_cb *);
-extern unsigned long ax25_display_timer(struct timer_list *);
+void ax25_setup_timers(ax25_cb *);
+void ax25_start_heartbeat(ax25_cb *);
+void ax25_start_t1timer(ax25_cb *);
+void ax25_start_t2timer(ax25_cb *);
+void ax25_start_t3timer(ax25_cb *);
+void ax25_start_idletimer(ax25_cb *);
+void ax25_stop_heartbeat(ax25_cb *);
+void ax25_stop_t1timer(ax25_cb *);
+void ax25_stop_t2timer(ax25_cb *);
+void ax25_stop_t3timer(ax25_cb *);
+void ax25_stop_idletimer(ax25_cb *);
+int ax25_t1timer_running(ax25_cb *);
+unsigned long ax25_display_timer(struct timer_list *);
/* ax25_uid.c */
extern int ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(uid_t);
-extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+ax25_uid_assoc *ax25_findbyuid(kuid_t);
+int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
extern const struct file_operations ax25_uid_fops;
-extern void ax25_uid_free(void);
+void ax25_uid_free(void);
/* sysctl_net_ax25.c */
#ifdef CONFIG_SYSCTL
-extern void ax25_register_sysctl(void);
-extern void ax25_unregister_sysctl(void);
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
#else
-static inline void ax25_register_sysctl(void) {};
-static inline void ax25_unregister_sysctl(void) {};
+static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
+static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
#endif /* CONFIG_SYSCTL */
#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index d81ea799770..904777c1cd2 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,4 +1,4 @@
-/*
+/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
@@ -12,30 +12,33 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __BLUETOOTH_H
#define __BLUETOOTH_H
-#include <asm/types.h>
-#include <asm/byteorder.h>
-#include <linux/list.h>
#include <linux/poll.h>
#include <net/sock.h>
+#include <linux/seq_file.h>
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
#define PF_BLUETOOTH AF_BLUETOOTH
#endif
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1 1
+#define BLUETOOTH_VER_1_2 2
+#define BLUETOOTH_VER_2_0 3
+
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
@@ -56,17 +59,74 @@
#define BT_SECURITY 4
struct bt_security {
__u8 level;
+ __u8 key_size;
};
#define BT_SECURITY_SDP 0
#define BT_SECURITY_LOW 1
#define BT_SECURITY_MEDIUM 2
#define BT_SECURITY_HIGH 3
+#define BT_SECURITY_FIPS 4
#define BT_DEFER_SETUP 7
-#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
-#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
-#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_FLUSHABLE 8
+
+#define BT_FLUSHABLE_OFF 0
+#define BT_FLUSHABLE_ON 1
+
+#define BT_POWER 9
+struct bt_power {
+ __u8 force_active;
+};
+#define BT_POWER_FORCE_ACTIVE_OFF 0
+#define BT_POWER_FORCE_ACTIVE_ON 1
+
+#define BT_CHANNEL_POLICY 10
+
+/* BR/EDR only (default policy)
+ * AMP controllers cannot be used.
+ * Channel move requests from the remote device are denied.
+ * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY 0
+
+/* BR/EDR Preferred
+ * Allow use of AMP controllers.
+ * If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ * Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
+
+/* AMP Preferred
+ * Allow use of AMP controllers
+ * If the L2CAP channel is currently on BR/EDR and AMP controller
+ * resources are available, initiate a channel move to AMP.
+ * Channel move requests from the remote device are allowed.
+ * If the L2CAP socket has not been connected yet, try to create
+ * and configure the channel directly on an AMP controller rather
+ * than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+
+#define BT_VOICE 11
+struct bt_voice {
+ __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT 0x0003
+#define BT_VOICE_CVSD_16BIT 0x0060
+
+#define BT_SNDMTU 12
+#define BT_RCVMTU 13
+
+__printf(1, 2)
+int bt_info(const char *fmt, ...);
+__printf(1, 2)
+int bt_err(const char *fmt, ...);
+
+#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
/* Connection and socket states */
enum {
@@ -81,27 +141,80 @@ enum {
BT_CLOSED
};
+/* If unused will be removed by compiler */
+static inline const char *state_to_string(int state)
+{
+ switch (state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
+}
+
/* BD Address */
typedef struct {
__u8 b[6];
} __packed bdaddr_t;
-#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
+/* BD Address type */
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
+static inline bool bdaddr_type_is_valid(__u8 type)
+{
+ switch (type) {
+ case BDADDR_BREDR:
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool bdaddr_type_is_le(__u8 type)
+{
+ switch (type) {
+ case BDADDR_LE_PUBLIC:
+ case BDADDR_LE_RANDOM:
+ return true;
+ }
+
+ return false;
+}
+
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
/* Copy, swap, convert BD Address */
-static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
+static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
{
return memcmp(ba1, ba2, sizeof(bdaddr_t));
}
-static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
+static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
{
memcpy(dst, src, sizeof(bdaddr_t));
}
void baswap(bdaddr_t *dst, bdaddr_t *src);
-char *batostr(bdaddr_t *ba);
-bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
@@ -109,41 +222,75 @@ bdaddr_t *strtoba(char *str);
struct bt_sock {
struct sock sk;
- bdaddr_t src;
- bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
- u32 defer_setup;
+ unsigned long flags;
+ void (*skb_msg_name)(struct sk_buff *, void *, int *);
+};
+
+enum {
+ BT_SK_DEFER_SETUP,
+ BT_SK_SUSPEND,
};
struct bt_sock_list {
struct hlist_head head;
rwlock_t lock;
+#ifdef CONFIG_PROC_FS
+ int (* custom_seq_show)(struct seq_file *, void *);
+#endif
};
int bt_sock_register(int proto, const struct net_proto_family *ops);
-int bt_sock_unregister(int proto);
+void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags);
+int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags);
int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags);
-uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
+uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
void bt_accept_enqueue(struct sock *parent, struct sock *sk);
void bt_accept_unlink(struct sock *sk);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
+struct l2cap_ctrl {
+ unsigned int sframe:1,
+ poll:1,
+ final:1,
+ fcs:1,
+ sar:2,
+ super:2;
+ __u16 reqseq;
+ __u16 txseq;
+ __u8 retries;
+};
+
+struct hci_dev;
+
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+
+struct hci_req_ctrl {
+ bool start;
+ u8 event;
+ hci_req_complete_t complete;
+};
+
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
__u16 expect;
- __u8 tx_seq;
- __u8 retries;
- __u8 sar;
+ __u8 force_active;
+ struct l2cap_chan *chan;
+ struct l2cap_ctrl control;
+ struct hci_req_ctrl req;
+ bdaddr_t bdaddr;
+ __le16 psm;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -151,24 +298,24 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
struct sk_buff *skb;
- if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) {
+ skb = alloc_skb(len + BT_SKB_RESERVE, how);
+ if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
return skb;
}
-static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len,
- int nb, int *err)
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
+ unsigned long len, int nb, int *err)
{
struct sk_buff *skb;
- release_sock(sk);
- if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
+ skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
+ if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
- lock_sock(sk);
if (!skb && *err)
return NULL;
@@ -189,14 +336,27 @@ out:
return NULL;
}
-int bt_err(__u16 code);
+int bt_to_errno(__u16 code);
-extern int hci_sock_init(void);
-extern void hci_sock_cleanup(void);
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
-extern int bt_sysfs_init(void);
-extern void bt_sysfs_cleanup(void);
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
+
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list *sk_list,
+ int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
extern struct dentry *bt_debugfs;
+int l2cap_init(void);
+void l2cap_exit(void);
+
+int sco_init(void);
+void sco_exit(void);
+
+void bt_sock_reclassify_lock(struct sock *sk, int proto);
+
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index e30e0083434..16587dcd6a9 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,4 +1,4 @@
-/*
+/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
@@ -12,13 +12,13 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
@@ -30,6 +30,13 @@
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+#define HCI_LINK_KEY_SIZE 16
+#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+
+#define HCI_MAX_AMP_ASSOC_SIZE 672
+
+#define HCI_MAX_CSB_DATA_SIZE 252
+
/* HCI dev events */
#define HCI_DEV_REG 1
#define HCI_DEV_UNREG 2
@@ -56,11 +63,28 @@
#define HCI_BREDR 0x00
#define HCI_AMP 0x01
+/* First BR/EDR Controller shall have ID = 0 */
+#define AMP_ID_BREDR 0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR 0x00
+#define AMP_TYPE_80211 0x01
+
+/* AMP controller status */
+#define AMP_STATUS_POWERED_DOWN 0x00
+#define AMP_STATUS_BLUETOOTH_ONLY 0x01
+#define AMP_STATUS_NO_CAPACITY 0x02
+#define AMP_STATUS_LOW_CAPACITY 0x03
+#define AMP_STATUS_MEDIUM_CAPACITY 0x04
+#define AMP_STATUS_HIGH_CAPACITY 0x05
+#define AMP_STATUS_FULL_CAPACITY 0x06
+
/* HCI device quirks */
enum {
- HCI_QUIRK_NO_RESET,
+ HCI_QUIRK_RESET_ON_CLOSE,
HCI_QUIRK_RAW_DEVICE,
- HCI_QUIRK_FIXUP_BUFFER_SIZE
+ HCI_QUIRK_FIXUP_BUFFER_SIZE,
+ HCI_QUIRK_BROKEN_STORED_LINK_KEY,
};
/* HCI device flags */
@@ -76,8 +100,55 @@ enum {
HCI_INQUIRY,
HCI_RAW,
+
+ HCI_RESET,
+};
+
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+ HCI_SETUP,
+ HCI_AUTO_OFF,
+ HCI_RFKILLED,
+ HCI_MGMT,
+ HCI_PAIRABLE,
+ HCI_SERVICE_CACHE,
+ HCI_DEBUG_KEYS,
+ HCI_DUT_MODE,
+ HCI_FORCE_SC,
+ HCI_FORCE_STATIC_ADDR,
+ HCI_UNREGISTER,
+ HCI_USER_CHANNEL,
+
+ HCI_LE_SCAN,
+ HCI_SSP_ENABLED,
+ HCI_SC_ENABLED,
+ HCI_SC_ONLY,
+ HCI_PRIVACY,
+ HCI_RPA_EXPIRED,
+ HCI_RPA_RESOLVING,
+ HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_ADVERTISING,
+ HCI_CONNECTABLE,
+ HCI_DISCOVERABLE,
+ HCI_LIMITED_DISCOVERABLE,
+ HCI_LINK_SECURITY,
+ HCI_PERIODIC_INQ,
+ HCI_FAST_CONNECTABLE,
+ HCI_BREDR_ENABLED,
+ HCI_6LOWPAN_ENABLED,
+ HCI_LE_SCAN_INTERRUPTED,
};
+/* A mask for the flags that are supposed to remain when a reset happens
+ * or the HCI device is closed.
+ */
+#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
+ BIT(HCI_FAST_CONNECTABLE))
+
/* HCI ioctl defines */
#define HCIDEVUP _IOW('H', 201, int)
#define HCIDEVDOWN _IOW('H', 202, int)
@@ -106,11 +177,14 @@ enum {
#define HCIINQUIRY _IOR('H', 240, int)
/* HCI timeouts */
-#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
-#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
-#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
-#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
-#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
+#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
+#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -150,8 +224,10 @@ enum {
#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
/* ACL flags */
+#define ACL_START_NO_FLUSH 0x00
#define ACL_CONT 0x01
#define ACL_START 0x02
+#define ACL_COMPLETE 0x03
#define ACL_ACTIVE_BCAST 0x04
#define ACL_PICO_BCAST 0x08
@@ -159,6 +235,9 @@ enum {
#define SCO_LINK 0x00
#define ACL_LINK 0x01
#define ESCO_LINK 0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK 0x80
+#define AMP_LINK 0x81
/* LMP features */
#define LMP_3SLOT 0x01
@@ -182,18 +261,45 @@ enum {
#define LMP_CVSD 0x01
#define LMP_PSCHEME 0x02
#define LMP_PCONTROL 0x04
+#define LMP_TRANSPARENT 0x08
+#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
#define LMP_EV4 0x01
#define LMP_EV5 0x02
+#define LMP_NO_BREDR 0x20
+#define LMP_LE 0x40
#define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
#define LMP_EDR_ESCO_3M 0x40
#define LMP_EDR_3S_ESCO 0x80
+#define LMP_EXT_INQ 0x01
+#define LMP_SIMUL_LE_BR 0x02
#define LMP_SIMPLE_PAIR 0x08
+#define LMP_NO_FLUSH 0x40
+
+#define LMP_LSTO 0x01
+#define LMP_INQ_TX_PWR 0x02
+#define LMP_EXTFEATURES 0x80
+
+/* Extended LMP features */
+#define LMP_CSB_MASTER 0x01
+#define LMP_CSB_SLAVE 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
+
+#define LMP_SC 0x01
+#define LMP_PING 0x02
+
+/* Host features */
+#define LMP_HOST_SSP 0x01
+#define LMP_HOST_LE 0x02
+#define LMP_HOST_LE_BREDR 0x04
+#define LMP_HOST_SC 0x08
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -215,6 +321,7 @@ enum {
#define HCI_LM_TRUSTED 0x0008
#define HCI_LM_RELIABLE 0x0010
#define HCI_LM_SECURE 0x0020
+#define HCI_LM_FIPS 0x0040
/* Authentication types */
#define HCI_AT_NO_BONDING 0x00
@@ -224,7 +331,77 @@ enum {
#define HCI_AT_GENERAL_BONDING 0x04
#define HCI_AT_GENERAL_BONDING_MITM 0x05
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY 0x00
+#define HCI_IO_DISPLAY_YESNO 0x01
+#define HCI_IO_KEYBOARD_ONLY 0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
+/* Link Key types */
+#define HCI_LK_COMBINATION 0x00
+#define HCI_LK_LOCAL_UNIT 0x01
+#define HCI_LK_REMOTE_UNIT 0x02
+#define HCI_LK_DEBUG_COMBINATION 0x03
+#define HCI_LK_UNAUTH_COMBINATION_P192 0x04
+#define HCI_LK_AUTH_COMBINATION_P192 0x05
+#define HCI_LK_CHANGED_COMBINATION 0x06
+#define HCI_LK_UNAUTH_COMBINATION_P256 0x07
+#define HCI_LK_AUTH_COMBINATION_P256 0x08
+/* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */
+#define HCI_SMP_STK 0x80
+#define HCI_SMP_STK_SLAVE 0x81
+#define HCI_SMP_LTK 0x82
+#define HCI_SMP_LTK_SLAVE 0x83
+
+/* Long Term Key types */
+#define HCI_LTK_UNAUTH 0x00
+#define HCI_LTK_AUTH 0x01
+
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_AUTH_FAILURE 0x05
+#define HCI_ERROR_MEMORY_EXCEEDED 0x07
+#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_REMOTE_USER_TERM 0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF 0x15
+#define HCI_ERROR_LOCAL_HOST_TERM 0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+
+/* Flow control modes */
+#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+
+/* The core spec defines 127 as the "not available" value */
+#define HCI_TX_POWER_INVALID 127
+
+/* Extended Inquiry Response field types */
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
+#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
+#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
+/* Low Energy Advertising Flags */
+#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
+#define LE_AD_GENERAL 0x02 /* General Discoverable */
+#define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */
+#define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */
+#define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */
+
/* ----- HCI Commands ---- */
+#define HCI_OP_NOP 0x0000
+
#define HCI_OP_INQUIRY 0x0401
struct hci_cp_inquiry {
__u8 lap[3];
@@ -234,6 +411,8 @@ struct hci_cp_inquiry {
#define HCI_OP_INQUIRY_CANCEL 0x0402
+#define HCI_OP_PERIODIC_INQ 0x0403
+
#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
#define HCI_OP_CREATE_CONN 0x0405
@@ -278,7 +457,7 @@ struct hci_cp_reject_conn_req {
#define HCI_OP_LINK_KEY_REPLY 0x040b
struct hci_cp_link_key_reply {
bdaddr_t bdaddr;
- __u8 link_key[16];
+ __u8 link_key[HCI_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@@ -292,11 +471,19 @@ struct hci_cp_pin_code_reply {
__u8 pin_len;
__u8 pin_code[16];
} __packed;
+struct hci_rp_pin_code_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
struct hci_cp_pin_code_neg_reply {
bdaddr_t bdaddr;
} __packed;
+struct hci_rp_pin_code_neg_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
struct hci_cp_change_conn_ptype {
@@ -377,6 +564,134 @@ struct hci_cp_reject_sync_conn_req {
__u8 reason;
} __packed;
+#define HCI_OP_IO_CAPABILITY_REPLY 0x042b
+struct hci_cp_io_capability_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_REPLY 0x042c
+struct hci_cp_user_confirm_reply {
+ bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_user_confirm_reply {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+
+#define HCI_OP_USER_PASSKEY_REPLY 0x042e
+struct hci_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
+
+#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
+struct hci_cp_remote_oob_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash[16];
+ __u8 randomizer[16];
+} __packed;
+
+#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433
+struct hci_cp_remote_oob_data_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+ bdaddr_t bdaddr;
+ __u8 reason;
+} __packed;
+
+#define HCI_OP_CREATE_PHY_LINK 0x0435
+struct hci_cp_create_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+struct hci_cp_accept_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_DISCONN_PHY_LINK 0x0437
+struct hci_cp_disconn_phy_link {
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+struct ext_flow_spec {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
+#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
+struct hci_cp_create_accept_logical_link {
+ __u8 phy_handle;
+ struct ext_flow_spec tx_flow_spec;
+ struct ext_flow_spec rx_flow_spec;
+} __packed;
+
+#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
+struct hci_cp_disconn_logical_link {
+ __le16 log_handle;
+} __packed;
+
+#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
+struct hci_cp_logical_link_cancel {
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+struct hci_rp_logical_link_cancel {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_OP_SET_CSB 0x0441
+struct hci_cp_set_csb {
+ __u8 enable;
+ __u8 lt_addr;
+ __u8 lpo_allowed;
+ __le16 packet_type;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 csb_sv_tout;
+} __packed;
+struct hci_rp_set_csb {
+ __u8 status;
+ __u8 lt_addr;
+ __le16 interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN 0x0443
+
+#define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY 0x0445
+struct hci_cp_remote_oob_ext_data_reply {
+ bdaddr_t bdaddr;
+ __u8 hash192[16];
+ __u8 randomizer192[16];
+ __u8 hash256[16];
+ __u8 randomizer256[16];
+} __packed;
+
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@@ -447,9 +762,6 @@ struct hci_cp_sniff_subrate {
} __packed;
#define HCI_OP_SET_EVENT_MASK 0x0c01
-struct hci_cp_set_event_mask {
- __u8 mask[8];
-} __packed;
#define HCI_OP_RESET 0x0c03
@@ -474,22 +786,30 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_AUTO_OFF 0x01
#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
+struct hci_cp_delete_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 delete_all;
+} __packed;
+
+#define HCI_MAX_NAME_LENGTH 248
+
#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
struct hci_cp_write_local_name {
- __u8 name[248];
+ __u8 name[HCI_MAX_NAME_LENGTH];
} __packed;
#define HCI_OP_READ_LOCAL_NAME 0x0c14
struct hci_rp_read_local_name {
__u8 status;
- __u8 name[248];
+ __u8 name[HCI_MAX_NAME_LENGTH];
} __packed;
#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
-#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
+#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
#define SCAN_DISABLED 0x00
#define SCAN_INQUIRY 0x01
#define SCAN_PAGE 0x02
@@ -537,6 +857,30 @@ struct hci_cp_host_buffer_size {
__le16 sco_max_pkt;
} __packed;
+#define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38
+struct hci_rp_read_num_supported_iac {
+ __u8 status;
+ __u8 num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
+struct hci_cp_write_current_iac_lap {
+ __u8 num_iac;
+ __u8 iac_lap[6];
+} __packed;
+
+#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
+
+#define HCI_MAX_EIR_LENGTH 240
+
+#define HCI_OP_WRITE_EIR 0x0c52
+struct hci_cp_write_eir {
+ __u8 fec;
+ __u8 data[HCI_MAX_EIR_LENGTH];
+} __packed;
+
#define HCI_OP_READ_SSP_MODE 0x0c55
struct hci_rp_read_ssp_mode {
__u8 status;
@@ -548,6 +892,99 @@ struct hci_cp_write_ssp_mode {
__u8 mode;
} __packed;
+#define HCI_OP_READ_LOCAL_OOB_DATA 0x0c57
+struct hci_rp_read_local_oob_data {
+ __u8 status;
+ __u8 hash[16];
+ __u8 randomizer[16];
+} __packed;
+
+#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+struct hci_rp_read_inq_rsp_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA 0x0c64
+
+#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
+struct hci_rp_read_flow_control_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
+#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
+struct hci_cp_write_le_host_supported {
+ __u8 le;
+ __u8 simul;
+} __packed;
+
+#define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74
+struct hci_cp_set_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CSB_DATA 0x0c76
+struct hci_cp_set_csb_data {
+ __u8 lt_addr;
+ __u8 fragment;
+ __u8 data_length;
+ __u8 data[HCI_MAX_CSB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_csb_data {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78
+struct hci_cp_write_sync_train_params {
+ __le16 interval_min;
+ __le16 interval_max;
+ __le32 sync_train_tout;
+ __u8 service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+ __u8 status;
+ __le16 sync_train_int;
+} __packed;
+
+#define HCI_OP_READ_SC_SUPPORT 0x0c79
+struct hci_rp_read_sc_support {
+ __u8 status;
+ __u8 support;
+} __packed;
+
+#define HCI_OP_WRITE_SC_SUPPORT 0x0c7a
+struct hci_cp_write_sc_support {
+ __u8 support;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d
+struct hci_rp_read_local_oob_ext_data {
+ __u8 status;
+ __u8 hash192[16];
+ __u8 randomizer192[16];
+ __u8 hash256[16];
+ __u8 randomizer256[16];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -571,6 +1008,9 @@ struct hci_rp_read_local_features {
} __packed;
#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
+struct hci_cp_read_local_ext_features {
+ __u8 page;
+} __packed;
struct hci_rp_read_local_ext_features {
__u8 status;
__u8 page;
@@ -593,6 +1033,264 @@ struct hci_rp_read_bd_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
+struct hci_rp_read_data_block_size {
+ __u8 status;
+ __le16 max_acl_len;
+ __le16 block_len;
+ __le16 num_blocks;
+} __packed;
+
+#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
+struct hci_rp_read_page_scan_activity {
+ __u8 status;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
+struct hci_cp_write_page_scan_activity {
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_READ_TX_POWER 0x0c2d
+struct hci_cp_read_tx_power {
+ __le16 handle;
+ __u8 type;
+} __packed;
+struct hci_rp_read_tx_power {
+ __u8 status;
+ __le16 handle;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
+struct hci_rp_read_page_scan_type {
+ __u8 status;
+ __u8 type;
+} __packed;
+
+#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47
+ #define PAGE_SCAN_TYPE_STANDARD 0x00
+ #define PAGE_SCAN_TYPE_INTERLACED 0x01
+
+#define HCI_OP_READ_RSSI 0x1405
+struct hci_cp_read_rssi {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_rssi {
+ __u8 status;
+ __le16 handle;
+ __s8 rssi;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
+struct hci_rp_read_local_amp_info {
+ __u8 status;
+ __u8 amp_status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le32 max_pdu;
+ __u8 amp_type;
+ __le16 pal_cap;
+ __le16 max_assoc_size;
+ __le32 max_flush_to;
+ __le32 be_flush_to;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
+struct hci_cp_read_local_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 max_len;
+} __packed;
+struct hci_rp_read_local_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+
+#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
+struct hci_cp_write_remote_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+struct hci_rp_write_remote_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_OP_ENABLE_DUT_MODE 0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
+
+#define HCI_OP_LE_SET_EVENT_MASK 0x2001
+struct hci_cp_le_set_event_mask {
+ __u8 mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
+struct hci_rp_le_read_buffer_size {
+ __u8 status;
+ __le16 le_mtu;
+ __u8 le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003
+struct hci_rp_le_read_local_features {
+ __u8 status;
+ __u8 features[8];
+} __packed;
+
+#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM 0x2006
+struct hci_cp_le_set_adv_param {
+ __le16 min_interval;
+ __le16 max_interval;
+ __u8 type;
+ __u8 own_address_type;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 channel_map;
+ __u8 filter_policy;
+} __packed;
+
+#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007
+struct hci_rp_le_read_adv_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
+
+#define HCI_MAX_AD_LENGTH 31
+
+#define HCI_OP_LE_SET_ADV_DATA 0x2008
+struct hci_cp_le_set_adv_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009
+struct hci_cp_le_set_scan_rsp_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
+
+#define LE_SCAN_PASSIVE 0x00
+#define LE_SCAN_ACTIVE 0x01
+
+#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
+struct hci_cp_le_set_scan_param {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+ __u8 own_address_type;
+ __u8 filter_policy;
+} __packed;
+
+#define LE_SCAN_DISABLE 0x00
+#define LE_SCAN_ENABLE 0x01
+#define LE_SCAN_FILTER_DUP_DISABLE 0x00
+#define LE_SCAN_FILTER_DUP_ENABLE 0x01
+
+#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
+struct hci_cp_le_set_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+} __packed;
+
+#define HCI_LE_USE_PEER_ADDR 0x00
+#define HCI_LE_USE_WHITELIST 0x01
+
+#define HCI_OP_LE_CREATE_CONN 0x200d
+struct hci_cp_le_create_conn {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __u8 filter_policy;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 own_address_type;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
+
+#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
+struct hci_rp_le_read_white_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
+
+#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011
+struct hci_cp_le_add_to_white_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012
+struct hci_cp_le_del_from_white_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_CONN_UPDATE 0x2013
+struct hci_cp_le_conn_update {
+ __le16 handle;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_START_ENC 0x2019
+struct hci_cp_le_start_enc {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+ __u8 ltk[16];
+} __packed;
+
+#define HCI_OP_LE_LTK_REPLY 0x201a
+struct hci_cp_le_ltk_reply {
+ __le16 handle;
+ __u8 ltk[16];
+} __packed;
+struct hci_rp_le_ltk_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_LTK_NEG_REPLY 0x201b
+struct hci_cp_le_ltk_neg_reply {
+ __le16 handle;
+} __packed;
+struct hci_rp_le_ltk_neg_reply {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c
+struct hci_rp_le_read_supported_states {
+ __u8 status;
+ __u8 le_states[8];
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -639,7 +1337,7 @@ struct hci_ev_auth_complete {
struct hci_ev_remote_name {
__u8 status;
bdaddr_t bdaddr;
- __u8 name[248];
+ __u8 name[HCI_MAX_NAME_LENGTH];
} __packed;
#define HCI_EV_ENCRYPT_CHANGE 0x08
@@ -706,9 +1404,14 @@ struct hci_ev_role_change {
} __packed;
#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_comp_pkts_info {
+ __le16 handle;
+ __le16 count;
+} __packed;
+
struct hci_ev_num_comp_pkts {
__u8 num_hndl;
- /* variable length part */
+ struct hci_comp_pkts_info handles[0];
} __packed;
#define HCI_EV_MODE_CHANGE 0x14
@@ -732,7 +1435,7 @@ struct hci_ev_link_key_req {
#define HCI_EV_LINK_KEY_NOTIFY 0x18
struct hci_ev_link_key_notify {
bdaddr_t bdaddr;
- __u8 link_key[16];
+ __u8 link_key[HCI_LINK_KEY_SIZE];
__u8 key_type;
} __packed;
@@ -828,23 +1531,171 @@ struct extended_inquiry_info {
__u8 data[240];
} __packed;
+#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
+struct hci_ev_key_refresh_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
#define HCI_EV_IO_CAPA_REQUEST 0x31
struct hci_ev_io_capa_request {
bdaddr_t bdaddr;
} __packed;
+#define HCI_EV_IO_CAPA_REPLY 0x32
+struct hci_ev_io_capa_reply {
+ bdaddr_t bdaddr;
+ __u8 capability;
+ __u8 oob_data;
+ __u8 authentication;
+} __packed;
+
+#define HCI_EV_USER_CONFIRM_REQUEST 0x33
+struct hci_ev_user_confirm_req {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_EV_USER_PASSKEY_REQUEST 0x34
+struct hci_ev_user_passkey_req {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
+struct hci_ev_remote_oob_data_request {
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
struct hci_ev_simple_pair_complete {
__u8 status;
bdaddr_t bdaddr;
} __packed;
+#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b
+struct hci_ev_user_passkey_notify {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_KEYPRESS_STARTED 0
+#define HCI_KEYPRESS_ENTERED 1
+#define HCI_KEYPRESS_ERASED 2
+#define HCI_KEYPRESS_CLEARED 3
+#define HCI_KEYPRESS_COMPLETED 4
+
+#define HCI_EV_KEYPRESS_NOTIFY 0x3c
+struct hci_ev_keypress_notify {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+
#define HCI_EV_REMOTE_HOST_FEATURES 0x3d
struct hci_ev_remote_host_features {
bdaddr_t bdaddr;
__u8 features[8];
} __packed;
+#define HCI_EV_LE_META 0x3e
+struct hci_ev_le_meta {
+ __u8 subevent;
+} __packed;
+
+#define HCI_EV_PHY_LINK_COMPLETE 0x40
+struct hci_ev_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_CHANNEL_SELECTED 0x41
+struct hci_ev_channel_selected {
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
+struct hci_ev_disconn_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
+struct hci_ev_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
+struct hci_ev_disconn_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_NUM_COMP_BLOCKS 0x48
+struct hci_comp_blocks_info {
+ __le16 handle;
+ __le16 pkts;
+ __le16 blocks;
+} __packed;
+
+struct hci_ev_num_comp_blocks {
+ __le16 num_blocks;
+ __u8 num_hndl;
+ struct hci_comp_blocks_info handles[0];
+} __packed;
+
+#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
+struct hci_ev_sync_train_complete {
+ __u8 status;
+} __packed;
+
+#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+
+/* Low energy meta events */
+#define LE_CONN_ROLE_MASTER 0x00
+
+#define HCI_EV_LE_CONN_COMPLETE 0x01
+struct hci_ev_le_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+#define HCI_EV_LE_LTK_REQ 0x05
+struct hci_ev_le_ltk_req {
+ __le16 handle;
+ __le64 rand;
+ __le16 ediv;
+} __packed;
+
+/* Advertising report event types */
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
+
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+
+#define HCI_EV_LE_ADVERTISING_REPORT 0x02
+struct hci_ev_le_advertising_info {
+ __u8 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
@@ -874,7 +1725,7 @@ struct hci_ev_si_security {
struct hci_command_hdr {
__le16 opcode; /* OCF & OGF */
- __u8 plen;
+ __u8 plen;
} __packed;
struct hci_event_hdr {
@@ -892,8 +1743,6 @@ struct hci_sco_hdr {
__u8 dlen;
} __packed;
-#ifdef __KERNEL__
-#include <linux/skbuff.h>
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@@ -908,15 +1757,14 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
{
return (struct hci_sco_hdr *) skb->data;
}
-#endif
/* Command opcode pack/unpack */
-#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10))
+#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
#define hci_opcode_ocf(op) (op & 0x03ff)
/* ACL handle and flags pack/unpack */
-#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
+#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)
@@ -934,9 +1782,15 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
struct sockaddr_hci {
sa_family_t hci_family;
unsigned short hci_dev;
+ unsigned short hci_channel;
};
#define HCI_DEV_NONE 0xffff
+#define HCI_CHANNEL_RAW 0
+#define HCI_CHANNEL_USER 1
+#define HCI_CHANNEL_MONITOR 2
+#define HCI_CHANNEL_CONTROL 3
+
struct hci_filter {
unsigned long type_mask;
unsigned long event_mask[2];
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ebec8c9a929..b386bf17e6c 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -27,9 +27,8 @@
#include <net/bluetooth/hci.h>
-/* HCI upper protocols */
-#define HCI_PROTO_L2CAP 0
-#define HCI_PROTO_SCO 1
+/* HCI priority */
+#define HCI_PRIO_MAX 7
/* HCI Core structures */
struct inquiry_data {
@@ -44,33 +43,126 @@ struct inquiry_data {
};
struct inquiry_entry {
- struct inquiry_entry *next;
+ struct list_head all; /* inq_cache.all */
+ struct list_head list; /* unknown or resolve */
+ enum {
+ NAME_NOT_KNOWN,
+ NAME_NEEDED,
+ NAME_PENDING,
+ NAME_KNOWN,
+ } name_state;
__u32 timestamp;
struct inquiry_data data;
};
-struct inquiry_cache {
- spinlock_t lock;
+struct discovery_state {
+ int type;
+ enum {
+ DISCOVERY_STOPPED,
+ DISCOVERY_STARTING,
+ DISCOVERY_FINDING,
+ DISCOVERY_RESOLVING,
+ DISCOVERY_STOPPING,
+ } state;
+ struct list_head all; /* All devices found during inquiry */
+ struct list_head unknown; /* Name state not known */
+ struct list_head resolve; /* Name needs to be resolved */
__u32 timestamp;
- struct inquiry_entry *list;
+ bdaddr_t last_adv_addr;
+ u8 last_adv_addr_type;
+ s8 last_adv_rssi;
+ u8 last_adv_data[HCI_MAX_AD_LENGTH];
+ u8 last_adv_data_len;
};
struct hci_conn_hash {
struct list_head list;
- spinlock_t lock;
unsigned int acl_num;
+ unsigned int amp_num;
unsigned int sco_num;
+ unsigned int le_num;
};
struct bdaddr_list {
struct list_head list;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
};
+
+struct bt_uuid {
+ struct list_head list;
+ u8 uuid[16];
+ u8 size;
+ u8 svc_hint;
+};
+
+struct smp_csrk {
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 master;
+ u8 val[16];
+};
+
+struct smp_ltk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ u8 val[16];
+};
+
+struct smp_irk {
+ struct list_head list;
+ bdaddr_t rpa;
+ bdaddr_t bdaddr;
+ u8 addr_type;
+ u8 val[16];
+};
+
+struct link_key {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 type;
+ u8 val[HCI_LINK_KEY_SIZE];
+ u8 pin_len;
+};
+
+struct oob_data {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 hash192[16];
+ u8 randomizer192[16];
+ u8 hash256[16];
+ u8 randomizer256[16];
+};
+
+#define HCI_MAX_SHORT_NAME_LENGTH 10
+
+/* Default LE RPA expiry time, 15 minutes */
+#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
+
+/* Default min/max age of connection information (1s/3s) */
+#define DEFAULT_CONN_INFO_MIN_AGE 1000
+#define DEFAULT_CONN_INFO_MAX_AGE 3000
+
+struct amp_assoc {
+ __u16 len;
+ __u16 offset;
+ __u16 rem_len;
+ __u16 len_so_far;
+ __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
+};
+
+#define HCI_MAX_PAGES 3
+
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
- spinlock_t lock;
- atomic_t refcnt;
+ struct mutex lock;
char name[8];
unsigned long flags;
@@ -78,15 +170,48 @@ struct hci_dev {
__u8 bus;
__u8 dev_type;
bdaddr_t bdaddr;
- __u8 dev_name[248];
+ bdaddr_t random_addr;
+ bdaddr_t static_addr;
+ __u8 adv_addr_type;
+ __u8 dev_name[HCI_MAX_NAME_LENGTH];
+ __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
+ __u8 eir[HCI_MAX_EIR_LENGTH];
__u8 dev_class[3];
- __u8 features[8];
+ __u8 major_class;
+ __u8 minor_class;
+ __u8 max_page;
+ __u8 features[HCI_MAX_PAGES][8];
+ __u8 le_features[8];
+ __u8 le_white_list_size;
+ __u8 le_states[8];
__u8 commands[64];
- __u8 ssp_mode;
__u8 hci_ver;
__u16 hci_rev;
+ __u8 lmp_ver;
__u16 manufacturer;
+ __u16 lmp_subver;
__u16 voice_setting;
+ __u8 num_iac;
+ __u8 io_capability;
+ __s8 inq_tx_power;
+ __u16 page_scan_interval;
+ __u16 page_scan_window;
+ __u8 page_scan_type;
+ __u8 le_adv_channel_map;
+ __u8 le_scan_type;
+ __u16 le_scan_interval;
+ __u16 le_scan_window;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u16 discov_interleaved_timeout;
+ __u16 conn_info_min_age;
+ __u16 conn_info_max_age;
+ __u8 ssp_debug_mode;
+
+ __u16 devid_source;
+ __u16 devid_vendor;
+ __u16 devid_product;
+ __u16 devid_version;
__u16 pkt_type;
__u16 esco_type;
@@ -97,31 +222,68 @@ struct hci_dev {
__u16 sniff_min_interval;
__u16 sniff_max_interval;
+ __u8 amp_status;
+ __u32 amp_total_bw;
+ __u32 amp_max_bw;
+ __u32 amp_min_latency;
+ __u32 amp_max_pdu;
+ __u8 amp_type;
+ __u16 amp_pal_cap;
+ __u16 amp_assoc_size;
+ __u32 amp_max_flush_to;
+ __u32 amp_be_flush_to;
+
+ struct amp_assoc loc_assoc;
+
+ __u8 flow_ctl_mode;
+
+ unsigned int auto_accept_delay;
+
unsigned long quirks;
atomic_t cmd_cnt;
unsigned int acl_cnt;
unsigned int sco_cnt;
+ unsigned int le_cnt;
unsigned int acl_mtu;
unsigned int sco_mtu;
+ unsigned int le_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
+ unsigned int le_pkts;
+
+ __u16 block_len;
+ __u16 block_mtu;
+ __u16 num_blocks;
+ __u16 block_cnt;
- unsigned long cmd_last_tx;
unsigned long acl_last_tx;
unsigned long sco_last_tx;
+ unsigned long le_last_tx;
struct workqueue_struct *workqueue;
+ struct workqueue_struct *req_workqueue;
+
+ struct work_struct power_on;
+ struct delayed_work power_off;
+
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
- struct tasklet_struct cmd_task;
- struct tasklet_struct rx_task;
- struct tasklet_struct tx_task;
+ struct delayed_work service_cache;
+
+ struct timer_list cmd_timer;
+
+ struct work_struct rx_work;
+ struct work_struct cmd_work;
+ struct work_struct tx_work;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
+ struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
struct sk_buff *reassembly[NUM_REASSEMBLY];
@@ -130,115 +292,198 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
- struct inquiry_cache inq_cache;
+ struct crypto_blkcipher *tfm_aes;
+
+ struct discovery_state discovery;
struct hci_conn_hash conn_hash;
+
+ struct list_head mgmt_pending;
struct list_head blacklist;
+ struct list_head uuids;
+ struct list_head link_keys;
+ struct list_head long_term_keys;
+ struct list_head identity_resolving_keys;
+ struct list_head remote_oob_data;
+ struct list_head le_white_list;
+ struct list_head le_conn_params;
+ struct list_head pend_le_conns;
struct hci_dev_stats stat;
- struct sk_buff_head driver_init;
-
- void *driver_data;
- void *core_data;
-
- atomic_t promisc;
+ atomic_t promisc;
struct dentry *debugfs;
- struct device *parent;
struct device dev;
struct rfkill *rfkill;
- struct module *owner;
+ unsigned long dev_flags;
+
+ struct delayed_work le_scan_disable;
+
+ __s8 adv_tx_power;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data_len;
+
+ __u8 irk[16];
+ __u32 rpa_timeout;
+ struct delayed_work rpa_expired;
+ bdaddr_t rpa;
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
- int (*send)(struct sk_buff *skb);
- void (*destruct)(struct hci_dev *hdev);
+ int (*setup)(struct hci_dev *hdev);
+ int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
- int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
};
+#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+
struct hci_conn {
struct list_head list;
- atomic_t refcnt;
- spinlock_t lock;
-
- bdaddr_t dst;
- __u16 handle;
- __u16 state;
- __u8 mode;
- __u8 type;
- __u8 out;
- __u8 attempt;
- __u8 dev_class[3];
- __u8 features[8];
- __u8 ssp_mode;
- __u16 interval;
- __u16 pkt_type;
- __u16 link_policy;
- __u32 link_mode;
- __u8 auth_type;
- __u8 sec_level;
- __u8 power_save;
- __u16 disc_timeout;
- unsigned long pend;
-
- unsigned int sent;
+ atomic_t refcnt;
- struct sk_buff_head data_q;
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
+ bdaddr_t init_addr;
+ __u8 init_addr_type;
+ bdaddr_t resp_addr;
+ __u8 resp_addr_type;
+ __u16 handle;
+ __u16 state;
+ __u8 mode;
+ __u8 type;
+ bool out;
+ __u8 attempt;
+ __u8 dev_class[3];
+ __u8 features[HCI_MAX_PAGES][8];
+ __u16 pkt_type;
+ __u16 link_policy;
+ __u32 link_mode;
+ __u8 key_type;
+ __u8 auth_type;
+ __u8 sec_level;
+ __u8 pending_sec_level;
+ __u8 pin_length;
+ __u8 enc_key_size;
+ __u8 io_capability;
+ __u32 passkey_notify;
+ __u8 passkey_entered;
+ __u16 disc_timeout;
+ __u16 setting;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+ unsigned long flags;
+
+ unsigned long conn_info_timestamp;
+
+ __u8 remote_cap;
+ __u8 remote_auth;
+ __u8 remote_id;
+ bool flush_key;
- struct timer_list disc_timer;
- struct timer_list idle_timer;
+ unsigned int sent;
- struct work_struct work_add;
- struct work_struct work_del;
+ struct sk_buff_head data_q;
+ struct list_head chan_list;
+
+ struct delayed_work disc_work;
+ struct delayed_work auto_accept_work;
+ struct delayed_work idle_work;
+ struct delayed_work le_conn_timeout;
struct device dev;
- atomic_t devref;
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
- void *priv;
+ void *smp_conn;
+ struct amp_mgr *amp_mgr;
struct hci_conn *link;
+
+ void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
+ void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
+};
+
+struct hci_chan {
+ struct list_head list;
+ __u16 handle;
+ struct hci_conn *conn;
+ struct sk_buff_head data_q;
+ unsigned int sent;
+ __u8 state;
+};
+
+struct hci_conn_params {
+ struct list_head list;
+
+ bdaddr_t addr;
+ u8 addr_type;
+
+ u16 conn_min_interval;
+ u16 conn_max_interval;
+
+ enum {
+ HCI_AUTO_CONN_DISABLED,
+ HCI_AUTO_CONN_ALWAYS,
+ HCI_AUTO_CONN_LINK_LOSS,
+ } auto_connect;
};
-extern struct hci_proto *hci_proto[];
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
extern rwlock_t hci_cb_list_lock;
-/* ----- Inquiry cache ----- */
-#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
-#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
+/* ----- HCI interface to upper protocols ----- */
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
-#define inquiry_cache_lock(c) spin_lock(&c->lock)
-#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
-#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
-#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
-static inline void inquiry_cache_init(struct hci_dev *hdev)
+/* ----- Inquiry cache ----- */
+#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
+#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
+
+static inline void discovery_init(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- spin_lock_init(&c->lock);
- c->list = NULL;
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ INIT_LIST_HEAD(&hdev->discovery.all);
+ INIT_LIST_HEAD(&hdev->discovery.unknown);
+ INIT_LIST_HEAD(&hdev->discovery.resolve);
}
+bool hci_discovery_active(struct hci_dev *hdev);
+
+void hci_discovery_set_state(struct hci_dev *hdev, int state);
+
static inline int inquiry_cache_empty(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
- return c->list == NULL;
+ return list_empty(&hdev->discovery.all);
}
static inline long inquiry_cache_age(struct hci_dev *hdev)
{
- struct inquiry_cache *c = &hdev->inq_cache;
+ struct discovery_state *c = &hdev->discovery;
return jiffies - c->timestamp;
}
@@ -247,96 +492,182 @@ static inline long inquiry_entry_age(struct inquiry_entry *e)
return jiffies - e->timestamp;
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state);
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie);
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp);
+void hci_inquiry_cache_flush(struct hci_dev *hdev);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
+ HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
+ HCI_CONN_LE_SMP_PEND,
+ HCI_CONN_MGMT_CONNECTED,
+ HCI_CONN_SSP_ENABLED,
+ HCI_CONN_SC_ENABLED,
+ HCI_CONN_AES_CCM,
+ HCI_CONN_POWER_SAVE,
+ HCI_CONN_REMOTE_OOB,
+ HCI_CONN_6LOWPAN,
};
-static inline void hci_conn_hash_init(struct hci_dev *hdev)
+static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
- struct hci_conn_hash *h = &hdev->conn_hash;
- INIT_LIST_HEAD(&h->list);
- spin_lock_init(&h->lock);
- h->acl_num = 0;
- h->sco_num = 0;
+ struct hci_dev *hdev = conn->hdev;
+ return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+}
+
+static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_add(&c->list, &h->list);
- if (c->type == ACL_LINK)
+ list_add_rcu(&c->list, &h->list);
+ switch (c->type) {
+ case ACL_LINK:
h->acl_num++;
- else
+ break;
+ case AMP_LINK:
+ h->amp_num++;
+ break;
+ case LE_LINK:
+ h->le_num++;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
h->sco_num++;
+ break;
+ }
}
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_del(&c->list);
- if (c->type == ACL_LINK)
+
+ list_del_rcu(&c->list);
+ synchronize_rcu();
+
+ switch (c->type) {
+ case ACL_LINK:
h->acl_num--;
- else
+ break;
+ case AMP_LINK:
+ h->amp_num--;
+ break;
+ case LE_LINK:
+ h->le_num--;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
h->sco_num--;
+ break;
+ }
+}
+
+static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+ case AMP_LINK:
+ return h->amp_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+ case ESCO_LINK:
+ return h->sco_num;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+ return c->acl_num + c->amp_num + c->sco_num + c->le_num;
}
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
- __u16 handle)
+ __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->handle == handle)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ rcu_read_unlock();
return c;
+ }
}
+ rcu_read_unlock();
+
return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
- __u8 type, bdaddr_t *ba)
+ __u8 type, bdaddr_t *ba)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && !bacmp(&c->dst, ba))
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
- __u8 type, __u16 state)
+ __u8 type, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && c->state == state)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
-void hci_acl_connect(struct hci_conn *conn);
-void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
-void hci_add_sco(struct hci_conn *conn, __u16 handle);
-void hci_setup_sync(struct hci_conn *conn, __u16 handle);
+void hci_disconnect(struct hci_conn *conn, __u8 reason);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
@@ -344,80 +675,142 @@ int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+void hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level, u8 auth_type);
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn);
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
int hci_conn_change_link_key(struct hci_conn *conn);
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
-void hci_conn_enter_active_mode(struct hci_conn *conn);
-void hci_conn_enter_sniff_mode(struct hci_conn *conn);
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_conn_hold_device(struct hci_conn *conn);
-void hci_conn_put_device(struct hci_conn *conn);
+void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+
+/*
+ * hci_conn_get() and hci_conn_put() are used to control the life-time of an
+ * "hci_conn" object. They do not guarantee that the hci_conn object is running,
+ * working or anything else. They just guarantee that the object is available
+ * and can be dereferenced. So you can use its locks, local variables and any
+ * other constant data.
+ * Before accessing runtime data, you _must_ lock the object and then check that
+ * it is still running. As soon as you release the locks, the connection might
+ * get dropped, though.
+ *
+ * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
+ * how long the underlying connection is held. So every channel that runs on the
+ * hci_conn object calls this to prevent the connection from disappearing. As
+ * long as you hold a device, you must also guarantee that you have a valid
+ * reference to the device via hci_conn_get() (or the initial reference from
+ * hci_conn_add()).
+ * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
+ * break because nobody cares for that. But this means, we cannot use
+ * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
+ */
+
+static inline void hci_conn_get(struct hci_conn *conn)
+{
+ get_device(&conn->dev);
+}
+
+static inline void hci_conn_put(struct hci_conn *conn)
+{
+ put_device(&conn->dev);
+}
static inline void hci_conn_hold(struct hci_conn *conn)
{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
atomic_inc(&conn->refcnt);
- del_timer(&conn->disc_timer);
+ cancel_delayed_work(&conn->disc_work);
}
-static inline void hci_conn_put(struct hci_conn *conn)
+static inline void hci_conn_drop(struct hci_conn *conn)
{
+ BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
+
if (atomic_dec_and_test(&conn->refcnt)) {
unsigned long timeo;
- if (conn->type == ACL_LINK) {
- del_timer(&conn->idle_timer);
+
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ cancel_delayed_work(&conn->idle_work);
if (conn->state == BT_CONNECTED) {
- timeo = msecs_to_jiffies(conn->disc_timeout);
+ timeo = conn->disc_timeout;
if (!conn->out)
timeo *= 2;
- } else
+ } else {
timeo = msecs_to_jiffies(10);
- } else
+ }
+ break;
+
+ case AMP_LINK:
+ timeo = conn->disc_timeout;
+ break;
+
+ default:
timeo = msecs_to_jiffies(10);
- mod_timer(&conn->disc_timer, jiffies + timeo);
+ break;
+ }
+
+ cancel_delayed_work(&conn->disc_work);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->disc_work, timeo);
}
}
/* ----- HCI Devices ----- */
-static inline void __hci_dev_put(struct hci_dev *d)
-{
- if (atomic_dec_and_test(&d->refcnt))
- d->destruct(d);
-}
-
static inline void hci_dev_put(struct hci_dev *d)
{
- __hci_dev_put(d);
- module_put(d->owner);
+ BT_DBG("%s orig refcnt %d", d->name,
+ atomic_read(&d->dev.kobj.kref.refcount));
+
+ put_device(&d->dev);
}
-static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
+static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
{
- atomic_inc(&d->refcnt);
+ BT_DBG("%s orig refcnt %d", d->name,
+ atomic_read(&d->dev.kobj.kref.refcount));
+
+ get_device(&d->dev);
return d;
}
-static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
+#define hci_dev_lock(d) mutex_lock(&d->lock)
+#define hci_dev_unlock(d) mutex_unlock(&d->lock)
+
+#define to_hci_dev(d) container_of(d, struct hci_dev, dev)
+#define to_hci_conn(c) container_of(c, struct hci_conn, dev)
+
+static inline void *hci_get_drvdata(struct hci_dev *hdev)
{
- if (try_module_get(d->owner))
- return __hci_dev_hold(d);
- return NULL;
+ return dev_get_drvdata(&hdev->dev);
}
-#define hci_dev_lock(d) spin_lock(&d->lock)
-#define hci_dev_unlock(d) spin_unlock(&d->lock)
-#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
-#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
+static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
struct hci_dev *hci_dev_get(int index);
-struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
-int hci_unregister_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
@@ -432,233 +825,476 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
int hci_inquiry(void __user *arg);
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_blacklist_clear(struct hci_dev *hdev);
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type);
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type);
+void hci_white_list_clear(struct hci_dev *hdev);
+int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+ u8 auto_connect, u16 conn_min_interval,
+ u16 conn_max_interval);
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_clear(struct hci_dev *hdev);
+
+struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conns_clear(struct hci_dev *hdev);
+
+void hci_update_background_scan(struct hci_dev *hdev);
+
+void hci_uuids_clear(struct hci_dev *hdev);
+
+void hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
+ bool master);
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 type, u8 authenticated,
+ u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, bool master);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
+void hci_smp_ltks_clear(struct hci_dev *hdev);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 val[16], bdaddr_t *rpa);
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+void hci_smp_irks_clear(struct hci_dev *hdev);
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev);
+struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *hash, u8 *randomizer);
+int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *hash192, u8 *randomizer192,
+ u8 *hash256, u8 *randomizer256);
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_frame(struct sk_buff *skb);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
-int hci_register_sysfs(struct hci_dev *hdev);
-void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_init_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
-#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
-#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
-#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
-#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
-#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
-#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
-#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
+#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
+#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
+#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
+#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
+#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
+#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
+#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
+#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
+#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
+#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
+#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
+#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
+#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
+#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
+#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
+#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+
+/* ----- Extended LMP capabilities ----- */
+#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
+#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
+#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
+#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
+#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
+#define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING)
+
+/* ----- Host capabilities ----- */
+#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
+#define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC)
+#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
+#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
/* ----- HCI protocols ----- */
-struct hci_proto {
- char *name;
- unsigned int id;
- unsigned long flags;
-
- void *priv;
+#define HCI_PROTO_DEFER 0x01
- int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
- int (*connect_cfm) (struct hci_conn *conn, __u8 status);
- int (*disconn_ind) (struct hci_conn *conn);
- int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
- int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
- int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
- int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
-};
-
-static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 type, __u8 *flags)
{
- register struct hci_proto *hp;
- int mask = 0;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
+ switch (type) {
+ case ACL_LINK:
+ return l2cap_connect_ind(hdev, bdaddr);
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
+ case SCO_LINK:
+ case ESCO_LINK:
+ return sco_connect_ind(hdev, bdaddr, flags);
- return mask;
+ default:
+ BT_ERR("unknown link type %d", type);
+ return -EINVAL;
+ }
}
static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_connect_cfm(conn, status);
+ break;
+
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_connect_cfm(conn, status);
+ break;
+
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
}
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
- register struct hci_proto *hp;
- int reason = 0x13;
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return HCI_ERROR_REMOTE_USER_TERM;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
-
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
-
- return reason;
+ return l2cap_disconn_ind(conn);
}
static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason);
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_disconn_cfm(conn, reason);
+ break;
+
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_disconn_cfm(conn, reason);
+ break;
+
+ /* L2CAP would be handled for BREDR chan */
+ case AMP_LINK:
+ break;
+
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason);
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
}
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
__u8 encrypt;
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
return;
- encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ return;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
+ l2cap_security_cfm(conn, status, encrypt);
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
{
- register struct hci_proto *hp;
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ l2cap_security_cfm(conn, status, encrypt);
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
-int hci_register_proto(struct hci_proto *hproto);
-int hci_unregister_proto(struct hci_proto *hproto);
-
/* ----- HCI callbacks ----- */
struct hci_cb {
struct list_head list;
char *name;
- void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
};
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head *p;
+ struct hci_cb *cb;
__u8 encrypt;
hci_proto_auth_cfm(conn, status);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
{
- struct list_head *p;
+ struct hci_cb *cb;
if (conn->sec_level == BT_SECURITY_SDP)
conn->sec_level = BT_SECURITY_LOW;
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+
hci_proto_encrypt_cfm(conn, status, encrypt);
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head *p;
+ struct hci_cb *cb;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
-static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
{
- struct list_head *p;
+ struct hci_cb *cb;
- read_lock_bh(&hci_cb_list_lock);
- list_for_each(p, &hci_cb_list) {
- struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+ read_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
+}
+
+static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
+{
+ size_t parsed = 0;
+
+ if (data_len < 2)
+ return false;
+
+ while (parsed < data_len - 1) {
+ u8 field_len = data[0];
+
+ if (field_len == 0)
+ break;
+
+ parsed += field_len + 1;
+
+ if (parsed > data_len)
+ break;
+
+ if (data[1] == type)
+ return true;
+
+ data += field_len + 1;
+ }
+
+ return false;
+}
+
+static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (addr_type != 0x01)
+ return false;
+
+ if ((bdaddr->b[5] & 0xc0) == 0x40)
+ return true;
+
+ return false;
+}
+
+static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 addr_type)
+{
+ if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
+ return NULL;
+
+ return hci_find_irk_by_rpa(hdev, bdaddr);
}
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-int hci_register_notifier(struct notifier_block *nb);
-int hci_unregister_notifier(struct notifier_block *nb);
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
+
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
+};
-int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param);
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+
+void hci_req_add_le_scan_disable(struct hci_request *req);
+void hci_req_add_le_passive_scan(struct hci_request *req);
+
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
-
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event);
+
+/* Management interface */
+#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
+ BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+
+/* These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define DISCOV_LE_SCAN_WIN 0x12
+#define DISCOV_LE_SCAN_INT 0x12
+#define DISCOV_LE_TIMEOUT 10240 /* msec */
+#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
+#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
+#define DISCOV_BREDR_INQUIRY_LEN 0x08
+
+int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+int mgmt_powered(struct hci_dev *hdev, u8 powered);
+void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+void mgmt_advertising(struct hci_dev *hdev, u8 advertising);
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason,
+ bool mgmt_connected);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 value,
+ u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered);
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
+ u8 *randomizer192, u8 *hash256,
+ u8 *randomizer256, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+ u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+ u8 scan_rsp_len);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ bool persistent);
+void mgmt_reenable_advertising(struct hci_dev *hdev);
+void mgmt_smp_complete(struct hci_conn *conn, bool complete);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -668,6 +1304,7 @@ struct hci_pinfo {
struct hci_dev *hdev;
struct hci_filter filter;
__u32 cmsg_mask;
+ unsigned short channel;
};
/* HCI security filter */
@@ -687,6 +1324,18 @@ struct hci_sec_filter {
#define hci_req_lock(d) mutex_lock(&d->req_lock)
#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
-void hci_req_complete(struct hci_dev *hdev, int result);
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+ u16 latency, u16 to_multiplier);
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
+ __u8 ltk[16]);
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+ u8 *own_addr_type);
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *bdaddr_type);
+
+#define SCO_AIRMODE_MASK 0x0003
+#define SCO_AIRMODE_CVSD 0x0000
+#define SCO_AIRMODE_TRANSP 0x0003
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
new file mode 100644
index 00000000000..77d1e576418
--- /dev/null
+++ b/include/net/bluetooth/hci_mon.h
@@ -0,0 +1,51 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_MON_H
+#define __HCI_MON_H
+
+struct hci_mon_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+#define HCI_MON_HDR_SIZE 6
+
+#define HCI_MON_NEW_INDEX 0
+#define HCI_MON_DEL_INDEX 1
+#define HCI_MON_COMMAND_PKT 2
+#define HCI_MON_EVENT_PKT 3
+#define HCI_MON_ACL_TX_PKT 4
+#define HCI_MON_ACL_RX_PKT 5
+#define HCI_MON_SCO_TX_PKT 6
+#define HCI_MON_SCO_RX_PKT 7
+
+struct hci_mon_new_index {
+ __u8 type;
+ __u8 bus;
+ bdaddr_t bdaddr;
+ char name[8];
+} __packed;
+#define HCI_MON_NEW_INDEX_SIZE 16
+
+#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index c819c8bf9b6..4abdcb220e3 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -1,4 +1,4 @@
-/*
+/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
@@ -14,33 +14,48 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __L2CAP_H
#define __L2CAP_H
+#include <asm/unaligned.h>
+
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_MIN_MTU 48
-#define L2CAP_DEFAULT_FLUSH_TO 0xffff
+#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
+#define L2CAP_EFS_DEFAULT_FLUSH_TO 0xFFFFFFFF
#define L2CAP_DEFAULT_TX_WINDOW 63
+#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
#define L2CAP_DEFAULT_MAX_TX 3
#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
-#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
+#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
#define L2CAP_DEFAULT_ACK_TO 200
-#define L2CAP_LOCAL_BUSY_TRIES 12
-
-#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
-#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
+#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
+#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
+#define L2CAP_LE_MIN_MTU 23
+
+#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
+#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000)
+#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
+#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+
+#define L2CAP_A2MP_DEFAULT_MTU 670
/* L2CAP socket address */
struct sockaddr_l2 {
@@ -48,6 +63,7 @@ struct sockaddr_l2 {
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
+ __u8 l2_bdaddr_type;
};
/* L2CAP socket options */
@@ -75,6 +91,7 @@ struct l2cap_conninfo {
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_LM_RELIABLE 0x0010
#define L2CAP_LM_SECURE 0x0020
+#define L2CAP_LM_FIPS 0x0040
/* L2CAP command codes */
#define L2CAP_COMMAND_REJ 0x01
@@ -88,44 +105,91 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
-
-/* L2CAP feature mask */
+#define L2CAP_CREATE_CHAN_REQ 0x0c
+#define L2CAP_CREATE_CHAN_RSP 0x0d
+#define L2CAP_MOVE_CHAN_REQ 0x0e
+#define L2CAP_MOVE_CHAN_RSP 0x0f
+#define L2CAP_MOVE_CHAN_CFM 0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
+#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
+#define L2CAP_LE_CONN_REQ 0x14
+#define L2CAP_LE_CONN_RSP 0x15
+#define L2CAP_LE_CREDITS 0x16
+
+/* L2CAP extended feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
#define L2CAP_FEAT_RETRANS 0x00000002
+#define L2CAP_FEAT_BIDIR_QOS 0x00000004
#define L2CAP_FEAT_ERTM 0x00000008
#define L2CAP_FEAT_STREAMING 0x00000010
#define L2CAP_FEAT_FCS 0x00000020
+#define L2CAP_FEAT_EXT_FLOW 0x00000040
#define L2CAP_FEAT_FIXED_CHAN 0x00000080
+#define L2CAP_FEAT_EXT_WINDOW 0x00000100
+#define L2CAP_FEAT_UCD 0x00000200
/* L2CAP checksum option */
#define L2CAP_FCS_NONE 0x00
#define L2CAP_FCS_CRC16 0x01
+/* L2CAP fixed channels */
+#define L2CAP_FC_L2CAP 0x02
+#define L2CAP_FC_CONNLESS 0x04
+#define L2CAP_FC_A2MP 0x08
+#define L2CAP_FC_6LOWPAN 0x3e /* reserved and temporary value */
+
/* L2CAP Control Field bit masks */
-#define L2CAP_CTRL_SAR 0xC000
-#define L2CAP_CTRL_REQSEQ 0x3F00
-#define L2CAP_CTRL_TXSEQ 0x007E
-#define L2CAP_CTRL_RETRANS 0x0080
-#define L2CAP_CTRL_FINAL 0x0080
-#define L2CAP_CTRL_POLL 0x0010
-#define L2CAP_CTRL_SUPERVISE 0x000C
-#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
-
-#define L2CAP_CTRL_TXSEQ_SHIFT 1
-#define L2CAP_CTRL_REQSEQ_SHIFT 8
-#define L2CAP_CTRL_SAR_SHIFT 14
+#define L2CAP_CTRL_SAR 0xC000
+#define L2CAP_CTRL_REQSEQ 0x3F00
+#define L2CAP_CTRL_TXSEQ 0x007E
+#define L2CAP_CTRL_SUPERVISE 0x000C
+
+#define L2CAP_CTRL_RETRANS 0x0080
+#define L2CAP_CTRL_FINAL 0x0080
+#define L2CAP_CTRL_POLL 0x0010
+#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT 1
+#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_POLL_SHIFT 4
+#define L2CAP_CTRL_FINAL_SHIFT 7
+#define L2CAP_CTRL_REQSEQ_SHIFT 8
+#define L2CAP_CTRL_SAR_SHIFT 14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR 0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL 0x00040000
+#define L2CAP_EXT_CTRL_FINAL 0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
+#define L2CAP_EXT_CTRL_SAR_SHIFT 16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_POLL_SHIFT 18
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
/* L2CAP Supervisory Function */
-#define L2CAP_SUPER_RCV_READY 0x0000
-#define L2CAP_SUPER_REJECT 0x0004
-#define L2CAP_SUPER_RCV_NOT_READY 0x0008
-#define L2CAP_SUPER_SELECT_REJECT 0x000C
+#define L2CAP_SUPER_RR 0x00
+#define L2CAP_SUPER_REJ 0x01
+#define L2CAP_SUPER_RNR 0x02
+#define L2CAP_SUPER_SREJ 0x03
/* L2CAP Segmentation and Reassembly */
-#define L2CAP_SDU_UNSEGMENTED 0x0000
-#define L2CAP_SDU_START 0x4000
-#define L2CAP_SDU_END 0x8000
-#define L2CAP_SDU_CONTINUE 0xC000
+#define L2CAP_SAR_UNSEGMENTED 0x00
+#define L2CAP_SAR_START 0x01
+#define L2CAP_SAR_END 0x02
+#define L2CAP_SAR_CONTINUE 0x03
+
+/* L2CAP Command rej. reasons */
+#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
+#define L2CAP_REJ_MTU_EXCEEDED 0x0001
+#define L2CAP_REJ_INVALID_CID 0x0002
/* L2CAP structures */
struct l2cap_hdr {
@@ -133,6 +197,14 @@ struct l2cap_hdr {
__le16 cid;
} __packed;
#define L2CAP_HDR_SIZE 4
+#define L2CAP_ENH_HDR_SIZE 6
+#define L2CAP_EXT_HDR_SIZE 8
+
+#define L2CAP_FCS_SIZE 2
+#define L2CAP_SDULEN_SIZE 2
+#define L2CAP_PSMLEN_SIZE 2
+#define L2CAP_ENH_CTRL_SIZE 2
+#define L2CAP_EXT_CTRL_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
@@ -141,10 +213,21 @@ struct l2cap_cmd_hdr {
} __packed;
#define L2CAP_CMD_HDR_SIZE 4
-struct l2cap_cmd_rej {
+struct l2cap_cmd_rej_unk {
__le16 reason;
} __packed;
+struct l2cap_cmd_rej_mtu {
+ __le16 reason;
+ __le16 max_mtu;
+} __packed;
+
+struct l2cap_cmd_rej_cid {
+ __le16 reason;
+ __le16 scid;
+ __le16 dcid;
+} __packed;
+
struct l2cap_conn_req {
__le16 psm;
__le16 scid;
@@ -157,20 +240,35 @@ struct l2cap_conn_rsp {
__le16 status;
} __packed;
-/* channel indentifier */
+/* protocol/service multiplexer (PSM) */
+#define L2CAP_PSM_SDP 0x0001
+#define L2CAP_PSM_RFCOMM 0x0003
+#define L2CAP_PSM_3DSP 0x0021
+
+/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
+#define L2CAP_CID_A2MP 0x0003
+#define L2CAP_CID_ATT 0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP 0x0006
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
+#define L2CAP_CID_LE_DYN_END 0x007f
-/* connect result */
+/* connect/create channel results */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
+#define L2CAP_CR_BAD_AMP 0x0005
+#define L2CAP_CR_AUTHENTICATION 0x0005
+#define L2CAP_CR_AUTHORIZATION 0x0006
+#define L2CAP_CR_BAD_KEY_SIZE 0x0007
+#define L2CAP_CR_ENCRYPTION 0x0008
-/* connect status */
+/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
@@ -192,6 +290,11 @@ struct l2cap_conf_rsp {
#define L2CAP_CONF_UNACCEPT 0x0001
#define L2CAP_CONF_REJECT 0x0002
#define L2CAP_CONF_UNKNOWN 0x0003
+#define L2CAP_CONF_PENDING 0x0004
+#define L2CAP_CONF_EFS_REJECT 0x0005
+
+/* configuration req/rsp continuation flag */
+#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
struct l2cap_conf_opt {
__u8 type;
@@ -208,6 +311,8 @@ struct l2cap_conf_opt {
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_RFC 0x04
#define L2CAP_CONF_FCS 0x05
+#define L2CAP_CONF_EFS 0x06
+#define L2CAP_CONF_EWS 0x07
#define L2CAP_CONF_MAX_SIZE 22
@@ -226,6 +331,27 @@ struct l2cap_conf_rfc {
#define L2CAP_MODE_ERTM 0x03
#define L2CAP_MODE_STREAMING 0x04
+/* Unlike the above this one doesn't actually map to anything that would
+ * ever be sent over the air. Therefore, use a value that's unlikely to
+ * ever be used in the BR/EDR configuration phase.
+ */
+#define L2CAP_MODE_LE_FLOWCTL 0x80
+
+struct l2cap_conf_efs {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC 0x00
+#define L2CAP_SERV_BESTEFFORT 0x01
+#define L2CAP_SERV_GUARANTEED 0x02
+
+#define L2CAP_BESTEFFORT_ID 0x01
+
struct l2cap_disconn_req {
__le16 dcid;
__le16 scid;
@@ -246,73 +372,122 @@ struct l2cap_info_rsp {
__u8 data[0];
} __packed;
-/* info type */
-#define L2CAP_IT_CL_MTU 0x0001
-#define L2CAP_IT_FEAT_MASK 0x0002
-#define L2CAP_IT_FIXED_CHAN 0x0003
+struct l2cap_create_chan_req {
+ __le16 psm;
+ __le16 scid;
+ __u8 amp_id;
+} __packed;
-/* info result */
-#define L2CAP_IR_SUCCESS 0x0000
-#define L2CAP_IR_NOTSUPP 0x0001
-
-/* ----- L2CAP connections ----- */
-struct l2cap_chan_list {
- struct sock *head;
- rwlock_t lock;
- long num;
-};
+struct l2cap_create_chan_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
-struct l2cap_conn {
- struct hci_conn *hcon;
+struct l2cap_move_chan_req {
+ __le16 icid;
+ __u8 dest_amp_id;
+} __packed;
- bdaddr_t *dst;
- bdaddr_t *src;
+struct l2cap_move_chan_rsp {
+ __le16 icid;
+ __le16 result;
+} __packed;
- unsigned int mtu;
+#define L2CAP_MR_SUCCESS 0x0000
+#define L2CAP_MR_PEND 0x0001
+#define L2CAP_MR_BAD_ID 0x0002
+#define L2CAP_MR_SAME_ID 0x0003
+#define L2CAP_MR_NOT_SUPP 0x0004
+#define L2CAP_MR_COLLISION 0x0005
+#define L2CAP_MR_NOT_ALLOWED 0x0006
- __u32 feat_mask;
+struct l2cap_move_chan_cfm {
+ __le16 icid;
+ __le16 result;
+} __packed;
- __u8 info_state;
- __u8 info_ident;
+#define L2CAP_MC_CONFIRMED 0x0000
+#define L2CAP_MC_UNCONFIRMED 0x0001
- struct timer_list info_timer;
+struct l2cap_move_chan_cfm_rsp {
+ __le16 icid;
+} __packed;
- spinlock_t lock;
+/* info type */
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+#define L2CAP_IT_FIXED_CHAN 0x0003
- struct sk_buff *rx_skb;
- __u32 rx_len;
- __u8 rx_ident;
- __u8 tx_ident;
+/* info result */
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
+
+struct l2cap_conn_param_update_req {
+ __le16 min;
+ __le16 max;
+ __le16 latency;
+ __le16 to_multiplier;
+} __packed;
- __u8 disc_reason;
+struct l2cap_conn_param_update_rsp {
+ __le16 result;
+} __packed;
- struct l2cap_chan_list chan_list;
-};
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
+#define L2CAP_CONN_PARAM_REJECTED 0x0001
-struct sock_del_list {
- struct sock *sk;
- struct list_head list;
-};
+#define L2CAP_LE_MAX_CREDITS 10
+#define L2CAP_LE_DEFAULT_MPS 230
-#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
-#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
-#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+struct l2cap_le_conn_req {
+ __le16 psm;
+ __le16 scid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+} __packed;
-/* ----- L2CAP channel and socket info ----- */
-#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
-#define TX_QUEUE(sk) (&l2cap_pi(sk)->tx_queue)
-#define SREJ_QUEUE(sk) (&l2cap_pi(sk)->srej_queue)
-#define BUSY_QUEUE(sk) (&l2cap_pi(sk)->busy_queue)
-#define SREJ_LIST(sk) (&l2cap_pi(sk)->srej_l.list)
+struct l2cap_le_conn_rsp {
+ __le16 dcid;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+} __packed;
-struct srej_list {
- __u8 tx_seq;
- struct list_head list;
+struct l2cap_le_credits {
+ __le16 cid;
+ __le16 credits;
+} __packed;
+
+/* ----- L2CAP channels and connections ----- */
+struct l2cap_seq_list {
+ __u16 head;
+ __u16 tail;
+ __u16 mask;
+ __u16 *list;
};
-struct l2cap_pinfo {
- struct bt_sock bt;
+#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
+#define L2CAP_SEQ_LIST_TAIL 0x8000
+
+struct l2cap_chan {
+ struct l2cap_conn *conn;
+ struct hci_conn *hs_hcon;
+ struct hci_chan *hs_hchan;
+ struct kref kref;
+
+ __u8 state;
+
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
__le16 psm;
+ __le16 sport;
__u16 dcid;
__u16 scid;
@@ -320,109 +495,403 @@ struct l2cap_pinfo {
__u16 omtu;
__u16 flush_to;
__u8 mode;
- __u8 num_conf_req;
- __u8 num_conf_rsp;
+ __u8 chan_type;
+ __u8 chan_policy;
- __u8 fcs;
__u8 sec_level;
- __u8 role_switch;
- __u8 force_reliable;
+
+ __u8 ident;
__u8 conf_req[64];
__u8 conf_len;
- __u8 conf_state;
- __u16 conn_state;
-
- __u8 next_tx_seq;
- __u8 expected_ack_seq;
- __u8 expected_tx_seq;
- __u8 buffer_seq;
- __u8 buffer_seq_srej;
- __u8 srej_save_reqseq;
- __u8 frames_sent;
- __u8 unacked_frames;
- __u8 retry_count;
- __u8 num_acked;
- __u16 sdu_len;
- __u16 partial_sdu_len;
- struct sk_buff *sdu;
+ __u8 num_conf_req;
+ __u8 num_conf_rsp;
- __u8 ident;
+ __u8 fcs;
- __u8 tx_win;
+ __u16 tx_win;
+ __u16 tx_win_max;
+ __u16 ack_win;
__u8 max_tx;
- __u8 remote_tx_win;
- __u8 remote_max_tx;
__u16 retrans_timeout;
__u16 monitor_timeout;
- __u16 remote_mps;
__u16 mps;
- __le16 sport;
+ __u16 tx_credits;
+ __u16 rx_credits;
+
+ __u8 tx_state;
+ __u8 rx_state;
+
+ unsigned long conf_state;
+ unsigned long conn_state;
+ unsigned long flags;
+
+ __u8 remote_amp_id;
+ __u8 local_amp_id;
+ __u8 move_id;
+ __u8 move_state;
+ __u8 move_role;
+
+ __u16 next_tx_seq;
+ __u16 expected_ack_seq;
+ __u16 expected_tx_seq;
+ __u16 buffer_seq;
+ __u16 srej_save_reqseq;
+ __u16 last_acked_seq;
+ __u16 frames_sent;
+ __u16 unacked_frames;
+ __u8 retry_count;
+ __u16 sdu_len;
+ struct sk_buff *sdu;
+ struct sk_buff *sdu_last_frag;
- struct timer_list retrans_timer;
- struct timer_list monitor_timer;
- struct timer_list ack_timer;
- struct sk_buff_head tx_queue;
- struct sk_buff_head srej_queue;
- struct sk_buff_head busy_queue;
- struct work_struct busy_work;
- struct srej_list srej_l;
- struct l2cap_conn *conn;
- struct sock *next_c;
- struct sock *prev_c;
+ __u16 remote_tx_win;
+ __u8 remote_max_tx;
+ __u16 remote_mps;
+
+ __u8 local_id;
+ __u8 local_stype;
+ __u16 local_msdu;
+ __u32 local_sdu_itime;
+ __u32 local_acc_lat;
+ __u32 local_flush_to;
+
+ __u8 remote_id;
+ __u8 remote_stype;
+ __u16 remote_msdu;
+ __u32 remote_sdu_itime;
+ __u32 remote_acc_lat;
+ __u32 remote_flush_to;
+
+ struct delayed_work chan_timer;
+ struct delayed_work retrans_timer;
+ struct delayed_work monitor_timer;
+ struct delayed_work ack_timer;
+
+ struct sk_buff *tx_send_head;
+ struct sk_buff_head tx_q;
+ struct sk_buff_head srej_q;
+ struct l2cap_seq_list srej_list;
+ struct l2cap_seq_list retrans_list;
+
+ struct list_head list;
+ struct list_head global_l;
+
+ void *data;
+ struct l2cap_ops *ops;
+ struct mutex lock;
};
-#define L2CAP_CONF_REQ_SENT 0x01
-#define L2CAP_CONF_INPUT_DONE 0x02
-#define L2CAP_CONF_OUTPUT_DONE 0x04
-#define L2CAP_CONF_MTU_DONE 0x08
-#define L2CAP_CONF_MODE_DONE 0x10
-#define L2CAP_CONF_CONNECT_PEND 0x20
-#define L2CAP_CONF_NO_FCS_RECV 0x40
-#define L2CAP_CONF_STATE2_DEVICE 0x80
+struct l2cap_ops {
+ char *name;
+
+ struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
+ int (*recv) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+ void (*teardown) (struct l2cap_chan *chan, int err);
+ void (*close) (struct l2cap_chan *chan);
+ void (*state_change) (struct l2cap_chan *chan,
+ int state, int err);
+ void (*ready) (struct l2cap_chan *chan);
+ void (*defer) (struct l2cap_chan *chan);
+ void (*resume) (struct l2cap_chan *chan);
+ void (*suspend) (struct l2cap_chan *chan);
+ void (*set_shutdown) (struct l2cap_chan *chan);
+ long (*get_sndtimeo) (struct l2cap_chan *chan);
+ struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
+ unsigned long len, int nb);
+};
+
+struct l2cap_conn {
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+
+ unsigned int mtu;
+
+ __u32 feat_mask;
+ __u8 fixed_chan_mask;
+ bool hs_enabled;
+
+ __u8 info_state;
+ __u8 info_ident;
+
+ struct delayed_work info_timer;
+
+ spinlock_t lock;
+
+ struct sk_buff *rx_skb;
+ __u32 rx_len;
+ __u8 tx_ident;
+
+ struct sk_buff_head pending_rx;
+ struct work_struct pending_rx_work;
+
+ __u8 disc_reason;
+
+ struct delayed_work security_timer;
+ struct smp_chan *smp_chan;
+
+ struct list_head chan_l;
+ struct mutex chan_lock;
+ struct kref ref;
+ struct list_head users;
+};
+
+struct l2cap_user {
+ struct list_head list;
+ int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
+ void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
+};
+
+#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
+#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
+#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+
+#define L2CAP_CHAN_RAW 1
+#define L2CAP_CHAN_CONN_LESS 2
+#define L2CAP_CHAN_CONN_ORIENTED 3
+#define L2CAP_CHAN_FIXED 4
+
+/* ----- L2CAP socket info ----- */
+#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
+struct l2cap_pinfo {
+ struct bt_sock bt;
+ struct l2cap_chan *chan;
+ struct sk_buff *rx_busy_skb;
+};
+
+enum {
+ CONF_REQ_SENT,
+ CONF_INPUT_DONE,
+ CONF_OUTPUT_DONE,
+ CONF_MTU_DONE,
+ CONF_MODE_DONE,
+ CONF_CONNECT_PEND,
+ CONF_RECV_NO_FCS,
+ CONF_STATE2_DEVICE,
+ CONF_EWS_RECV,
+ CONF_LOC_CONF_PEND,
+ CONF_REM_CONF_PEND,
+ CONF_NOT_COMPLETE,
+};
#define L2CAP_CONF_MAX_CONF_REQ 2
#define L2CAP_CONF_MAX_CONF_RSP 2
-#define L2CAP_CONN_SAR_SDU 0x0001
-#define L2CAP_CONN_SREJ_SENT 0x0002
-#define L2CAP_CONN_WAIT_F 0x0004
-#define L2CAP_CONN_SREJ_ACT 0x0008
-#define L2CAP_CONN_SEND_PBIT 0x0010
-#define L2CAP_CONN_REMOTE_BUSY 0x0020
-#define L2CAP_CONN_LOCAL_BUSY 0x0040
-#define L2CAP_CONN_REJ_ACT 0x0080
-#define L2CAP_CONN_SEND_FBIT 0x0100
-#define L2CAP_CONN_RNR_SENT 0x0200
-#define L2CAP_CONN_SAR_RETRY 0x0400
-
-#define __mod_retrans_timer() mod_timer(&l2cap_pi(sk)->retrans_timer, \
- jiffies + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
-#define __mod_monitor_timer() mod_timer(&l2cap_pi(sk)->monitor_timer, \
- jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
-#define __mod_ack_timer() mod_timer(&l2cap_pi(sk)->ack_timer, \
- jiffies + msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
-
-static inline int l2cap_tx_window_full(struct sock *sk)
+enum {
+ CONN_SREJ_SENT,
+ CONN_WAIT_F,
+ CONN_SREJ_ACT,
+ CONN_SEND_PBIT,
+ CONN_REMOTE_BUSY,
+ CONN_LOCAL_BUSY,
+ CONN_REJ_ACT,
+ CONN_SEND_FBIT,
+ CONN_RNR_SENT,
+};
+
+/* Definitions for flags in l2cap_chan */
+enum {
+ FLAG_ROLE_SWITCH,
+ FLAG_FORCE_ACTIVE,
+ FLAG_FORCE_RELIABLE,
+ FLAG_FLUSHABLE,
+ FLAG_EXT_CTRL,
+ FLAG_EFS_ENABLE,
+ FLAG_DEFER_SETUP,
+ FLAG_LE_CONN_REQ_SENT,
+};
+
+enum {
+ L2CAP_TX_STATE_XMIT,
+ L2CAP_TX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_RX_STATE_RECV,
+ L2CAP_RX_STATE_SREJ_SENT,
+ L2CAP_RX_STATE_MOVE,
+ L2CAP_RX_STATE_WAIT_P,
+ L2CAP_RX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_TXSEQ_EXPECTED,
+ L2CAP_TXSEQ_EXPECTED_SREJ,
+ L2CAP_TXSEQ_UNEXPECTED,
+ L2CAP_TXSEQ_UNEXPECTED_SREJ,
+ L2CAP_TXSEQ_DUPLICATE,
+ L2CAP_TXSEQ_DUPLICATE_SREJ,
+ L2CAP_TXSEQ_INVALID,
+ L2CAP_TXSEQ_INVALID_IGNORE,
+};
+
+enum {
+ L2CAP_EV_DATA_REQUEST,
+ L2CAP_EV_LOCAL_BUSY_DETECTED,
+ L2CAP_EV_LOCAL_BUSY_CLEAR,
+ L2CAP_EV_RECV_REQSEQ_AND_FBIT,
+ L2CAP_EV_RECV_FBIT,
+ L2CAP_EV_RETRANS_TO,
+ L2CAP_EV_MONITOR_TO,
+ L2CAP_EV_EXPLICIT_POLL,
+ L2CAP_EV_RECV_IFRAME,
+ L2CAP_EV_RECV_RR,
+ L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR,
+ L2CAP_EV_RECV_SREJ,
+ L2CAP_EV_RECV_FRAME,
+};
+
+enum {
+ L2CAP_MOVE_ROLE_NONE,
+ L2CAP_MOVE_ROLE_INITIATOR,
+ L2CAP_MOVE_ROLE_RESPONDER,
+};
+
+enum {
+ L2CAP_MOVE_STABLE,
+ L2CAP_MOVE_WAIT_REQ,
+ L2CAP_MOVE_WAIT_RSP,
+ L2CAP_MOVE_WAIT_RSP_SUCCESS,
+ L2CAP_MOVE_WAIT_CONFIRM,
+ L2CAP_MOVE_WAIT_CONFIRM_RSP,
+ L2CAP_MOVE_WAIT_LOGICAL_COMP,
+ L2CAP_MOVE_WAIT_LOGICAL_CFM,
+ L2CAP_MOVE_WAIT_LOCAL_BUSY,
+ L2CAP_MOVE_WAIT_PREPARE,
+};
+
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
+
+static inline void l2cap_chan_lock(struct l2cap_chan *chan)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int sub;
+ mutex_lock(&chan->lock);
+}
- sub = (pi->next_tx_seq - pi->expected_ack_seq) % 64;
+static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
+{
+ mutex_unlock(&chan->lock);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+ struct delayed_work *work, long timeout)
+{
+ BT_DBG("chan %p state %s timeout %ld", chan,
+ state_to_string(chan->state), timeout);
- if (sub < 0)
- sub += 64;
+ /* If delayed work cancelled do not hold(chan)
+ since it is already done with previous set_timer */
+ if (!cancel_delayed_work(work))
+ l2cap_chan_hold(chan);
+
+ schedule_delayed_work(work, timeout);
+}
- return sub == pi->remote_tx_win;
+static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
+ struct delayed_work *work)
+{
+ bool ret;
+
+ /* put(chan) if delayed work cancelled otherwise it
+ is done in delayed work function */
+ ret = cancel_delayed_work(work);
+ if (ret)
+ l2cap_chan_put(chan);
+
+ return ret;
+}
+
+#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
+#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
+#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
+#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
+#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
+
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+ if (seq1 >= seq2)
+ return seq1 - seq2;
+ else
+ return chan->tx_win_max + 1 - seq2 + seq1;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+ return (seq + 1) % (chan->tx_win_max + 1);
}
-#define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1
-#define __get_reqseq(ctrl) ((ctrl) & L2CAP_CTRL_REQSEQ) >> 8
-#define __is_iframe(ctrl) !((ctrl) & L2CAP_CTRL_FRAME_TYPE)
-#define __is_sframe(ctrl) (ctrl) & L2CAP_CTRL_FRAME_TYPE
-#define __is_sar_start(ctrl) ((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START
+static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
+{
+ return NULL;
+}
+
+static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
+{
+}
+
+static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+ return 0;
+}
-void l2cap_load(void);
+extern bool disable_ertm;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+bool l2cap_is_socket(struct socket *sock);
+
+void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
+
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
+
+struct l2cap_chan *l2cap_chan_create(void);
+void l2cap_chan_close(struct l2cap_chan *chan, int reason);
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
+void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+int l2cap_ertm_init(struct l2cap_chan *chan);
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_conn_update_id_addr(struct hci_conn *hcon);
+void l2cap_send_conn_req(struct l2cap_chan *chan);
+void l2cap_move_start(struct l2cap_chan *chan);
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status);
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
+
+void l2cap_conn_get(struct l2cap_conn *conn);
+void l2cap_conn_put(struct l2cap_conn *conn);
+
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
new file mode 100644
index 00000000000..bcffc9ae0c8
--- /dev/null
+++ b/include/net/bluetooth/mgmt.h
@@ -0,0 +1,580 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#define MGMT_INDEX_NONE 0xFFFF
+
+#define MGMT_STATUS_SUCCESS 0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
+#define MGMT_STATUS_NOT_CONNECTED 0x02
+#define MGMT_STATUS_FAILED 0x03
+#define MGMT_STATUS_CONNECT_FAILED 0x04
+#define MGMT_STATUS_AUTH_FAILED 0x05
+#define MGMT_STATUS_NOT_PAIRED 0x06
+#define MGMT_STATUS_NO_RESOURCES 0x07
+#define MGMT_STATUS_TIMEOUT 0x08
+#define MGMT_STATUS_ALREADY_CONNECTED 0x09
+#define MGMT_STATUS_BUSY 0x0a
+#define MGMT_STATUS_REJECTED 0x0b
+#define MGMT_STATUS_NOT_SUPPORTED 0x0c
+#define MGMT_STATUS_INVALID_PARAMS 0x0d
+#define MGMT_STATUS_DISCONNECTED 0x0e
+#define MGMT_STATUS_NOT_POWERED 0x0f
+#define MGMT_STATUS_CANCELLED 0x10
+#define MGMT_STATUS_INVALID_INDEX 0x11
+#define MGMT_STATUS_RFKILLED 0x12
+
+struct mgmt_hdr {
+ __le16 opcode;
+ __le16 index;
+ __le16 len;
+} __packed;
+
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+#define MGMT_ADDR_INFO_SIZE 7
+
+#define MGMT_OP_READ_VERSION 0x0001
+#define MGMT_READ_VERSION_SIZE 0
+struct mgmt_rp_read_version {
+ __u8 version;
+ __le16 revision;
+} __packed;
+
+#define MGMT_OP_READ_COMMANDS 0x0002
+#define MGMT_READ_COMMANDS_SIZE 0
+struct mgmt_rp_read_commands {
+ __le16 num_commands;
+ __le16 num_events;
+ __le16 opcodes[0];
+} __packed;
+
+#define MGMT_OP_READ_INDEX_LIST 0x0003
+#define MGMT_READ_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_index_list {
+ __le16 num_controllers;
+ __le16 index[0];
+} __packed;
+
+/* Reserve one extra byte for names in management messages so that they
+ * are always guaranteed to be nul-terminated */
+#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1)
+
+#define MGMT_SETTING_POWERED 0x00000001
+#define MGMT_SETTING_CONNECTABLE 0x00000002
+#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
+#define MGMT_SETTING_DISCOVERABLE 0x00000008
+#define MGMT_SETTING_PAIRABLE 0x00000010
+#define MGMT_SETTING_LINK_SECURITY 0x00000020
+#define MGMT_SETTING_SSP 0x00000040
+#define MGMT_SETTING_BREDR 0x00000080
+#define MGMT_SETTING_HS 0x00000100
+#define MGMT_SETTING_LE 0x00000200
+#define MGMT_SETTING_ADVERTISING 0x00000400
+#define MGMT_SETTING_SECURE_CONN 0x00000800
+#define MGMT_SETTING_DEBUG_KEYS 0x00001000
+#define MGMT_SETTING_PRIVACY 0x00002000
+
+#define MGMT_OP_READ_INFO 0x0004
+#define MGMT_READ_INFO_SIZE 0
+struct mgmt_rp_read_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __u8 dev_class[3];
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+struct mgmt_mode {
+ __u8 val;
+} __packed;
+
+#define MGMT_SETTING_SIZE 1
+
+#define MGMT_OP_SET_POWERED 0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE 0x0006
+struct mgmt_cp_set_discoverable {
+ __u8 val;
+ __le16 timeout;
+} __packed;
+#define MGMT_SET_DISCOVERABLE_SIZE 3
+
+#define MGMT_OP_SET_CONNECTABLE 0x0007
+
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
+
+#define MGMT_OP_SET_PAIRABLE 0x0009
+
+#define MGMT_OP_SET_LINK_SECURITY 0x000A
+
+#define MGMT_OP_SET_SSP 0x000B
+
+#define MGMT_OP_SET_HS 0x000C
+
+#define MGMT_OP_SET_LE 0x000D
+#define MGMT_OP_SET_DEV_CLASS 0x000E
+struct mgmt_cp_set_dev_class {
+ __u8 major;
+ __u8 minor;
+} __packed;
+#define MGMT_SET_DEV_CLASS_SIZE 2
+
+#define MGMT_OP_SET_LOCAL_NAME 0x000F
+struct mgmt_cp_set_local_name {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+#define MGMT_SET_LOCAL_NAME_SIZE 260
+
+#define MGMT_OP_ADD_UUID 0x0010
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+#define MGMT_ADD_UUID_SIZE 17
+
+#define MGMT_OP_REMOVE_UUID 0x0011
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
+} __packed;
+#define MGMT_REMOVE_UUID_SIZE 16
+
+struct mgmt_link_key_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 val[16];
+ __u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_LINK_KEYS 0x0012
+struct mgmt_cp_load_link_keys {
+ __u8 debug_keys;
+ __le16 key_count;
+ struct mgmt_link_key_info keys[0];
+} __packed;
+#define MGMT_LOAD_LINK_KEYS_SIZE 3
+
+#define MGMT_LTK_UNAUTHENTICATED 0x00
+#define MGMT_LTK_AUTHENTICATED 0x01
+
+struct mgmt_ltk_info {
+ struct mgmt_addr_info addr;
+ __u8 type;
+ __u8 master;
+ __u8 enc_size;
+ __le16 ediv;
+ __le64 rand;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013
+struct mgmt_cp_load_long_term_keys {
+ __le16 key_count;
+ struct mgmt_ltk_info keys[0];
+} __packed;
+#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2
+
+#define MGMT_OP_DISCONNECT 0x0014
+struct mgmt_cp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_disconnect {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS 0x0015
+#define MGMT_GET_CONNECTIONS_SIZE 0
+struct mgmt_rp_get_connections {
+ __le16 conn_count;
+ struct mgmt_addr_info addr[0];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY 0x0016
+struct mgmt_cp_pin_code_reply {
+ struct mgmt_addr_info addr;
+ __u8 pin_len;
+ __u8 pin_code[16];
+} __packed;
+#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17)
+struct mgmt_rp_pin_code_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
+struct mgmt_cp_pin_code_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_IO_CAPABILITY 0x0018
+struct mgmt_cp_set_io_capability {
+ __u8 io_capability;
+} __packed;
+#define MGMT_SET_IO_CAPABILITY_SIZE 1
+
+#define MGMT_OP_PAIR_DEVICE 0x0019
+struct mgmt_cp_pair_device {
+ struct mgmt_addr_info addr;
+ __u8 io_cap;
+} __packed;
+#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_pair_device {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A
+#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNPAIR_DEVICE 0x001B
+struct mgmt_cp_unpair_device {
+ struct mgmt_addr_info addr;
+ __u8 disconnect;
+} __packed;
+#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_unpair_device {
+ struct mgmt_addr_info addr;
+};
+
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001C
+struct mgmt_cp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_user_confirm_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D
+struct mgmt_cp_user_confirm_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001E
+struct mgmt_cp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+} __packed;
+#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4)
+struct mgmt_rp_user_passkey_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F
+struct mgmt_cp_user_passkey_neg_reply {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
+#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
+struct mgmt_rp_read_local_oob_data {
+ __u8 hash[16];
+ __u8 randomizer[16];
+} __packed;
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 hash192[16];
+ __u8 randomizer192[16];
+ __u8 hash256[16];
+ __u8 randomizer256[16];
+} __packed;
+
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
+struct mgmt_cp_add_remote_oob_data {
+ struct mgmt_addr_info addr;
+ __u8 hash[16];
+ __u8 randomizer[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
+struct mgmt_cp_add_remote_oob_ext_data {
+ struct mgmt_addr_info addr;
+ __u8 hash192[16];
+ __u8 randomizer192[16];
+ __u8 hash256[16];
+ __u8 randomizer256[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
+
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
+struct mgmt_cp_remove_remote_oob_data {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_START_DISCOVERY 0x0023
+struct mgmt_cp_start_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_START_DISCOVERY_SIZE 1
+
+#define MGMT_OP_STOP_DISCOVERY 0x0024
+struct mgmt_cp_stop_discovery {
+ __u8 type;
+} __packed;
+#define MGMT_STOP_DISCOVERY_SIZE 1
+
+#define MGMT_OP_CONFIRM_NAME 0x0025
+struct mgmt_cp_confirm_name {
+ struct mgmt_addr_info addr;
+ __u8 name_known;
+} __packed;
+#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1)
+struct mgmt_rp_confirm_name {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_OP_BLOCK_DEVICE 0x0026
+struct mgmt_cp_block_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_UNBLOCK_DEVICE 0x0027
+struct mgmt_cp_unblock_device {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+
+#define MGMT_OP_SET_DEVICE_ID 0x0028
+struct mgmt_cp_set_device_id {
+ __le16 source;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+} __packed;
+#define MGMT_SET_DEVICE_ID_SIZE 8
+
+#define MGMT_OP_SET_ADVERTISING 0x0029
+
+#define MGMT_OP_SET_BREDR 0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS 0x002B
+struct mgmt_cp_set_static_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_SET_SCAN_PARAMS 0x002C
+struct mgmt_cp_set_scan_params {
+ __le16 interval;
+ __le16 window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE 4
+
+#define MGMT_OP_SET_SECURE_CONN 0x002D
+
+#define MGMT_OP_SET_DEBUG_KEYS 0x002E
+
+#define MGMT_OP_SET_PRIVACY 0x002F
+struct mgmt_cp_set_privacy {
+ __u8 privacy;
+ __u8 irk[16];
+} __packed;
+#define MGMT_SET_PRIVACY_SIZE 17
+
+struct mgmt_irk_info {
+ struct mgmt_addr_info addr;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_IRKS 0x0030
+struct mgmt_cp_load_irks {
+ __le16 irk_count;
+ struct mgmt_irk_info irks[0];
+} __packed;
+#define MGMT_LOAD_IRKS_SIZE 2
+
+#define MGMT_OP_GET_CONN_INFO 0x0031
+struct mgmt_cp_get_conn_info {
+ struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CONN_INFO_SIZE MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_conn_info {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __s8 tx_power;
+ __s8 max_tx_power;
+} __packed;
+
+#define MGMT_EV_CMD_COMPLETE 0x0001
+struct mgmt_ev_cmd_complete {
+ __le16 opcode;
+ __u8 status;
+ __u8 data[0];
+} __packed;
+
+#define MGMT_EV_CMD_STATUS 0x0002
+struct mgmt_ev_cmd_status {
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_ERROR 0x0003
+struct mgmt_ev_controller_error {
+ __u8 error_code;
+} __packed;
+
+#define MGMT_EV_INDEX_ADDED 0x0004
+
+#define MGMT_EV_INDEX_REMOVED 0x0005
+
+#define MGMT_EV_NEW_SETTINGS 0x0006
+
+#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
+struct mgmt_ev_class_of_dev_changed {
+ __u8 dev_class[3];
+};
+
+#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
+struct mgmt_ev_local_name_changed {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
+
+#define MGMT_EV_NEW_LINK_KEY 0x0009
+struct mgmt_ev_new_link_key {
+ __u8 store_hint;
+ struct mgmt_link_key_info key;
+} __packed;
+
+#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A
+struct mgmt_ev_new_long_term_key {
+ __u8 store_hint;
+ struct mgmt_ltk_info key;
+} __packed;
+
+#define MGMT_EV_DEVICE_CONNECTED 0x000B
+struct mgmt_ev_device_connected {
+ struct mgmt_addr_info addr;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_DEV_DISCONN_UNKNOWN 0x00
+#define MGMT_DEV_DISCONN_TIMEOUT 0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
+#define MGMT_DEV_DISCONN_REMOTE 0x03
+
+#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
+struct mgmt_ev_device_disconnected {
+ struct mgmt_addr_info addr;
+ __u8 reason;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED 0x000D
+struct mgmt_ev_connect_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST 0x000E
+struct mgmt_ev_pin_code_request {
+ struct mgmt_addr_info addr;
+ __u8 secure;
+} __packed;
+
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
+struct mgmt_ev_user_confirm_request {
+ struct mgmt_addr_info addr;
+ __u8 confirm_hint;
+ __le32 value;
+} __packed;
+
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010
+struct mgmt_ev_user_passkey_request {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED 0x0011
+struct mgmt_ev_auth_failed {
+ struct mgmt_addr_info addr;
+ __u8 status;
+} __packed;
+
+#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
+#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
+
+#define MGMT_EV_DEVICE_FOUND 0x0012
+struct mgmt_ev_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_DISCOVERING 0x0013
+struct mgmt_ev_discovering {
+ __u8 type;
+ __u8 discovering;
+} __packed;
+
+#define MGMT_EV_DEVICE_BLOCKED 0x0014
+struct mgmt_ev_device_blocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
+struct mgmt_ev_device_unblocked {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_DEVICE_UNPAIRED 0x0016
+struct mgmt_ev_device_unpaired {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_PASSKEY_NOTIFY 0x0017
+struct mgmt_ev_passkey_notify {
+ struct mgmt_addr_info addr;
+ __le32 passkey;
+ __u8 entered;
+} __packed;
+
+#define MGMT_EV_NEW_IRK 0x0018
+struct mgmt_ev_new_irk {
+ __u8 store_hint;
+ bdaddr_t rpa;
+ struct mgmt_irk_info irk;
+} __packed;
+
+struct mgmt_csrk_info {
+ struct mgmt_addr_info addr;
+ __u8 master;
+ __u8 val[16];
+} __packed;
+
+#define MGMT_EV_NEW_CSRK 0x0019
+struct mgmt_ev_new_csrk {
+ __u8 store_hint;
+ struct mgmt_csrk_info key;
+} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 71047bc0af8..578b83127af 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -1,5 +1,5 @@
-/*
- RFCOMM implementation for Linux Bluetooth stack (BlueZ).
+/*
+ RFCOMM implementation for Linux Bluetooth stack (BlueZ)
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -11,13 +11,13 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
@@ -105,7 +105,7 @@
struct rfcomm_hdr {
u8 addr;
u8 ctrl;
- u8 len; // Actual size can be 2 bytes
+ u8 len; /* Actual size can be 2 bytes */
} __packed;
struct rfcomm_cmd {
@@ -158,7 +158,6 @@ struct rfcomm_session {
struct timer_list timer;
unsigned long state;
unsigned long flags;
- atomic_t refcnt;
int initiator;
/* Default DLC parameters */
@@ -174,7 +173,7 @@ struct rfcomm_dlc {
struct sk_buff_head tx_queue;
struct timer_list timer;
- spinlock_t lock;
+ struct mutex lock;
unsigned long state;
unsigned long flags;
atomic_t refcnt;
@@ -211,6 +210,7 @@ struct rfcomm_dlc {
#define RFCOMM_AUTH_ACCEPT 6
#define RFCOMM_AUTH_REJECT 7
#define RFCOMM_DEFER_SETUP 8
+#define RFCOMM_ENC_DROP 9
/* Scheduling flags and events */
#define RFCOMM_SCHED_WAKEUP 31
@@ -228,21 +228,24 @@ struct rfcomm_dlc {
/* ---- RFCOMM SEND RPN ---- */
int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
u8 bit_rate, u8 data_bits, u8 stop_bits,
- u8 parity, u8 flow_ctrl_settings,
+ u8 parity, u8 flow_ctrl_settings,
u8 xon_char, u8 xoff_char, u16 param_mask);
/* ---- RFCOMM DLCs (channels) ---- */
struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
void rfcomm_dlc_free(struct rfcomm_dlc *d);
-int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
+int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+ u8 channel);
int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb);
int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
void rfcomm_dlc_accept(struct rfcomm_dlc *d);
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
-#define rfcomm_dlc_lock(d) spin_lock(&d->lock)
-#define rfcomm_dlc_unlock(d) spin_unlock(&d->lock)
+#define rfcomm_dlc_lock(d) mutex_lock(&d->lock)
+#define rfcomm_dlc_unlock(d) mutex_unlock(&d->lock)
static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
{
@@ -255,8 +258,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
rfcomm_dlc_free(d);
}
-extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
-extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
@@ -271,12 +274,8 @@ static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
}
/* ---- RFCOMM sessions ---- */
-void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst);
-
-static inline void rfcomm_session_hold(struct rfcomm_session *s)
-{
- atomic_inc(&s->refcnt);
-}
+void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src,
+ bdaddr_t *dst);
/* ---- RFCOMM sockets ---- */
struct sockaddr_rc {
@@ -298,11 +297,14 @@ struct rfcomm_conninfo {
#define RFCOMM_LM_TRUSTED 0x0008
#define RFCOMM_LM_RELIABLE 0x0010
#define RFCOMM_LM_SECURE 0x0020
+#define RFCOMM_LM_FIPS 0x0040
#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
struct rfcomm_pinfo {
struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
struct rfcomm_dlc *dlc;
u8 channel;
u8 sec_level;
@@ -312,7 +314,8 @@ struct rfcomm_pinfo {
int rfcomm_init_sockets(void);
void rfcomm_cleanup_sockets(void);
-int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d);
+int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
+ struct rfcomm_dlc **d);
/* ---- RFCOMM TTY ---- */
#define RFCOMM_MAX_DEV 256
@@ -323,11 +326,16 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc
#define RFCOMMGETDEVINFO _IOR('R', 211, int)
#define RFCOMMSTEALDLC _IOW('R', 220, int)
+/* rfcomm_dev.flags bit definitions */
#define RFCOMM_REUSE_DLC 0
#define RFCOMM_RELEASE_ONHUP 1
#define RFCOMM_HANGUP_NOW 2
#define RFCOMM_TTY_ATTACHED 3
-#define RFCOMM_TTY_RELEASED 4
+#define RFCOMM_DEFUNCT_BIT4 4 /* don't reuse this bit - userspace visible */
+
+/* rfcomm_dev.status bit definitions */
+#define RFCOMM_DEV_RELEASED 0
+#define RFCOMM_TTY_OWNED 1
struct rfcomm_dev_req {
s16 dev_id;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index e28a2a77147..2019d1a0996 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -1,4 +1,4 @@
-/*
+/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
@@ -12,13 +12,13 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
@@ -55,11 +55,8 @@ struct sco_conninfo {
struct sco_conn {
struct hci_conn *hcon;
- bdaddr_t *dst;
- bdaddr_t *src;
-
spinlock_t lock;
- struct sock *sk;
+ struct sock *sk;
unsigned int mtu;
};
@@ -72,7 +69,10 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
__u32 flags;
+ __u16 setting;
struct sco_conn *conn;
};
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
new file mode 100644
index 00000000000..1d67fb6b23a
--- /dev/null
+++ b/include/net/busy_poll.h
@@ -0,0 +1,169 @@
+/*
+ * net busy poll support
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Eliezer Tamir
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ */
+
+#ifndef _LINUX_NET_BUSY_POLL_H
+#define _LINUX_NET_BUSY_POLL_H
+
+#include <linux/netdevice.h>
+#include <net/ip.h>
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
+/* return values from ndo_ll_poll */
+#define LL_FLUSH_FAILED -1
+#define LL_FLUSH_BUSY -2
+
+static inline bool net_busy_loop_on(void)
+{
+ return sysctl_net_busy_poll;
+}
+
+static inline u64 busy_loop_us_clock(void)
+{
+ return local_clock() >> 10;
+}
+
+static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
+{
+ return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
+}
+
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline unsigned long busy_loop_end_time(void)
+{
+ return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+ return sk->sk_ll_usec && sk->sk_napi_id &&
+ !need_resched() && !signal_pending(current);
+}
+
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+ unsigned long now = busy_loop_us_clock();
+
+ return time_after(now, end_time);
+}
+
+/* when used in sock_poll() nonblock is known at compile time to be true
+ * so the loop and end_time will be optimized out
+ */
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+ unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
+ const struct net_device_ops *ops;
+ struct napi_struct *napi;
+ int rc = false;
+
+ /*
+ * rcu read lock for napi hash
+ * bh so we don't race with net_rx_action
+ */
+ rcu_read_lock_bh();
+
+ napi = napi_by_id(sk->sk_napi_id);
+ if (!napi)
+ goto out;
+
+ ops = napi->dev->netdev_ops;
+ if (!ops->ndo_busy_poll)
+ goto out;
+
+ do {
+ rc = ops->ndo_busy_poll(napi);
+
+ if (rc == LL_FLUSH_FAILED)
+ break; /* permanent failure */
+
+ if (rc > 0)
+ /* local bh are disabled so it is ok to use _BH */
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+ cpu_relax();
+
+ } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+ !need_resched() && !busy_loop_timeout(end_time));
+
+ rc = !skb_queue_empty(&sk->sk_receive_queue);
+out:
+ rcu_read_unlock_bh();
+ return rc;
+}
+
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ skb->napi_id = napi->napi_id;
+}
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_napi_id = skb->napi_id;
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline unsigned long net_busy_loop_on(void)
+{
+ return 0;
+}
+
+static inline unsigned long busy_loop_end_time(void)
+{
+ return 0;
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+ return false;
+}
+
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+}
+
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+ return true;
+}
+
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+ return false;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 8eff83b9536..028b754ae9b 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -9,8 +9,10 @@
#include <net/caif/caif_layer.h>
#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
#include <linux/caif/caif_socket.h>
#include <linux/if.h>
+#include <linux/net.h>
/**
* struct caif_param - CAIF parameters.
@@ -62,46 +64,65 @@ struct caif_connect_request {
* E.g. CAIF Socket will call this function for each socket it connects
* and have one client_layer instance for each socket.
*/
-int caif_connect_client(struct caif_connect_request *conn_req,
+int caif_connect_client(struct net *net,
+ struct caif_connect_request *conn_req,
struct cflayer *client_layer, int *ifindex,
int *headroom, int *tailroom);
/**
* caif_disconnect_client - Disconnects a client from the CAIF stack.
*
- * @client_layer: Client layer to be removed.
+ * @client_layer: Client layer to be disconnected.
*/
-int caif_disconnect_client(struct cflayer *client_layer);
+int caif_disconnect_client(struct net *net, struct cflayer *client_layer);
+
/**
- * caif_release_client - Release adaptation layer reference to client.
+ * caif_client_register_refcnt - register ref-count functions provided by client.
*
- * @client_layer: Client layer.
+ * @adapt_layer: Client layer using CAIF Stack.
+ * @hold: Function provided by client layer increasing ref-count
+ * @put: Function provided by client layer decreasing ref-count
*
- * Releases a client/adaptation layer use of the caif stack.
- * This function must be used after caif_disconnect_client to
- * decrease the reference count of the service layer.
+ * Client of the CAIF Stack must register functions for reference counting.
+ * These functions are called by the CAIF Stack for every upstream packet,
+ * and must therefore be implemented efficiently.
+ *
+ * Client should call caif_free_client when reference count degrease to zero.
*/
-void caif_release_client(struct cflayer *client_layer);
+void caif_client_register_refcnt(struct cflayer *adapt_layer,
+ void (*hold)(struct cflayer *lyr),
+ void (*put)(struct cflayer *lyr));
/**
- * connect_req_to_link_param - Translate configuration parameters
- * from socket format to internal format.
- * @cnfg: Pointer to configuration handler
- * @con_req: Configuration parameters supplied in function
- * caif_connect_client
- * @channel_setup_param: Parameters supplied to the CAIF Core stack for
- * setting up channels.
+ * caif_free_client - Free memory used to manage the client in the CAIF Stack.
*
+ * @client_layer: Client layer to be removed.
+ *
+ * This function must be called from client layer in order to free memory.
+ * Caller must guarantee that no packets are in flight upstream when calling
+ * this function.
*/
-int connect_req_to_link_param(struct cfcnfg *cnfg,
- struct caif_connect_request *con_req,
- struct cfctrl_link_param *channel_setup_param);
+void caif_free_client(struct cflayer *adap_layer);
/**
- * get_caif_conf() - Get the configuration handler.
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev: Network device to enroll.
+ * @caifdev: Configuration information from CAIF Link Layer
+ * @link_support: Link layer support layer
+ * @head_room: Head room needed by link support layer
+ * @layer: Lowest layer in CAIF stack
+ * @rcv_fun: Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
*/
-struct cfcnfg *get_caif_conf(void);
-
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer, int (**rcv_func)(
+ struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *));
#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_device.h b/include/net/caif/caif_device.h
index d02f044adb8..d6e3c4267c8 100644
--- a/include/net/caif/caif_device.h
+++ b/include/net/caif/caif_device.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
new file mode 100644
index 00000000000..097f69cfaa7
--- /dev/null
+++ b/include/net/caif/caif_hsi.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Daniel Martensson / daniel.martensson@stericsson.com
+ * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_HSI_H_
+#define CAIF_HSI_H_
+
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_device.h>
+#include <linux/atomic.h>
+
+/*
+ * Maximum number of CAIF frames that can reside in the same HSI frame.
+ */
+#define CFHSI_MAX_PKTS 15
+
+/*
+ * Maximum number of bytes used for the frame that can be embedded in the
+ * HSI descriptor.
+ */
+#define CFHSI_MAX_EMB_FRM_SZ 96
+
+/*
+ * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
+ * debugging. Both TX and RX buffers will be filled before the transfer.
+ */
+#define CFHSI_DBG_PREFILL 0
+
+/* Structure describing a HSI packet descriptor. */
+#pragma pack(1) /* Byte alignment. */
+struct cfhsi_desc {
+ u8 header;
+ u8 offset;
+ u16 cffrm_len[CFHSI_MAX_PKTS];
+ u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
+};
+#pragma pack() /* Default alignment. */
+
+/* Size of the complete HSI packet descriptor. */
+#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
+
+/*
+ * Size of the complete HSI packet descriptor excluding the optional embedded
+ * CAIF frame.
+ */
+#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
+
+/*
+ * Maximum bytes transferred in one transfer.
+ */
+#define CFHSI_MAX_CAIF_FRAME_SZ 4096
+
+#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
+
+/* Size of the complete HSI TX buffer. */
+#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
+
+/* Size of the complete HSI RX buffer. */
+#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
+
+/* Bitmasks for the HSI descriptor. */
+#define CFHSI_PIGGY_DESC (0x01 << 7)
+
+#define CFHSI_TX_STATE_IDLE 0
+#define CFHSI_TX_STATE_XFER 1
+
+#define CFHSI_RX_STATE_DESC 0
+#define CFHSI_RX_STATE_PAYLOAD 1
+
+/* Bitmasks for power management. */
+#define CFHSI_WAKE_UP 0
+#define CFHSI_WAKE_UP_ACK 1
+#define CFHSI_WAKE_DOWN_ACK 2
+#define CFHSI_AWAKE 3
+#define CFHSI_WAKELOCK_HELD 4
+#define CFHSI_SHUTDOWN 5
+#define CFHSI_FLUSH_FIFO 6
+
+#ifndef CFHSI_INACTIVITY_TOUT
+#define CFHSI_INACTIVITY_TOUT (1 * HZ)
+#endif /* CFHSI_INACTIVITY_TOUT */
+
+#ifndef CFHSI_WAKE_TOUT
+#define CFHSI_WAKE_TOUT (3 * HZ)
+#endif /* CFHSI_WAKE_TOUT */
+
+#ifndef CFHSI_MAX_RX_RETRIES
+#define CFHSI_MAX_RX_RETRIES (10 * HZ)
+#endif
+
+/* Structure implemented by the CAIF HSI driver. */
+struct cfhsi_cb_ops {
+ void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
+ void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
+ void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
+ void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
+};
+
+/* Structure implemented by HSI device. */
+struct cfhsi_ops {
+ int (*cfhsi_up) (struct cfhsi_ops *dev);
+ int (*cfhsi_down) (struct cfhsi_ops *dev);
+ int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
+ int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
+ int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
+ int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
+ int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
+ int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
+ int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
+ struct cfhsi_cb_ops *cb_ops;
+};
+
+/* Structure holds status of received CAIF frames processing */
+struct cfhsi_rx_state {
+ int state;
+ int nfrms;
+ int pld_len;
+ int retries;
+ bool piggy_desc;
+};
+
+/* Priority mapping */
+enum {
+ CFHSI_PRIO_CTL = 0,
+ CFHSI_PRIO_VI,
+ CFHSI_PRIO_VO,
+ CFHSI_PRIO_BEBK,
+ CFHSI_PRIO_LAST,
+};
+
+struct cfhsi_config {
+ u32 inactivity_timeout;
+ u32 aggregation_timeout;
+ u32 head_align;
+ u32 tail_align;
+ u32 q_high_mark;
+ u32 q_low_mark;
+};
+
+/* Structure implemented by CAIF HSI drivers. */
+struct cfhsi {
+ struct caif_dev_common cfdev;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct sk_buff_head qhead[CFHSI_PRIO_LAST];
+ struct cfhsi_cb_ops cb_ops;
+ struct cfhsi_ops *ops;
+ int tx_state;
+ struct cfhsi_rx_state rx_state;
+ struct cfhsi_config cfg;
+ int rx_len;
+ u8 *rx_ptr;
+ u8 *tx_buf;
+ u8 *rx_buf;
+ u8 *rx_flip_buf;
+ spinlock_t lock;
+ int flow_off_sent;
+ struct list_head list;
+ struct work_struct wake_up_work;
+ struct work_struct wake_down_work;
+ struct work_struct out_of_sync_work;
+ struct workqueue_struct *wq;
+ wait_queue_head_t wake_up_wait;
+ wait_queue_head_t wake_down_wait;
+ wait_queue_head_t flush_fifo_wait;
+ struct timer_list inactivity_timer;
+ struct timer_list rx_slowpath_timer;
+
+ /* TX aggregation */
+ int aggregation_len;
+ struct timer_list aggregation_timer;
+
+ unsigned long bits;
+};
+extern struct platform_driver cfhsi_driver;
+
+/**
+ * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
+ * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
+ * taking the HSI wakeline down, in milliseconds.
+ * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
+ * enum ifla_caif_hsi is used to specify the configuration attributes.
+ */
+enum ifla_caif_hsi {
+ __IFLA_CAIF_HSI_UNSPEC,
+ __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+ __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+ __IFLA_CAIF_HSI_HEAD_ALIGN,
+ __IFLA_CAIF_HSI_TAIL_ALIGN,
+ __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+ __IFLA_CAIF_HSI_QLOW_WATERMARK,
+ __IFLA_CAIF_HSI_MAX
+};
+
+struct cfhsi_ops *cfhsi_get_ops(void);
+
+#endif /* CAIF_HSI_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index c8b07a904e7..94e5ed64dc6 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -15,7 +15,6 @@ struct cfpktq;
struct caif_payload_info;
struct caif_packet_funcs;
-
#define CAIF_LAYER_NAME_SZ 16
/**
@@ -33,7 +32,6 @@ do { \
} \
} while (0)
-
/**
* enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
*
@@ -123,9 +121,7 @@ enum caif_direction {
* @transmit: Packet transmit funciton.
* @ctrlcmd: Used for control signalling upwards in the stack.
* @modemcmd: Used for control signaling downwards in the stack.
- * @prio: Priority of this layer.
* @id: The identity of this layer
- * @type: The type of this layer
* @name: Name of the layer.
*
* This structure defines the layered structure in CAIF.
@@ -141,7 +137,7 @@ enum caif_direction {
* - All layers must use this structure. If embedding it, then place this
* structure first in the layer specific structure.
*
- * - Each layer should not depend on any others layer private data.
+ * - Each layer should not depend on any others layer's private data.
*
* - In order to send data upwards do
* layer->up->receive(layer->up, packet);
@@ -155,16 +151,23 @@ struct cflayer {
struct list_head node;
/*
- * receive() - Receive Function.
+ * receive() - Receive Function (non-blocking).
* Contract: Each layer must implement a receive function passing the
* CAIF packets upwards in the stack.
* Packet handling rules:
- * - The CAIF packet (cfpkt) cannot be accessed after
- * passing it to the next layer using up->receive().
+ * - The CAIF packet (cfpkt) ownership is passed to the
+ * called receive function. This means that the the
+ * packet cannot be accessed after passing it to the
+ * above layer using up->receive().
+ *
* - If parsing of the packet fails, the packet must be
- * destroyed and -1 returned from the function.
+ * destroyed and negative error code returned
+ * from the function.
+ * EXCEPTION: If the framing layer (cffrml) returns
+ * -EILSEQ, the packet is not freed.
+ *
* - If parsing succeeds (and above layers return OK) then
- * the function must return a value > 0.
+ * the function must return a value >= 0.
*
* Returns result < 0 indicates an error, 0 or positive value
* indicates success.
@@ -176,7 +179,7 @@ struct cflayer {
int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
/*
- * transmit() - Transmit Function.
+ * transmit() - Transmit Function (non-blocking).
* Contract: Each layer must implement a transmit function passing the
* CAIF packet downwards in the stack.
* Packet handling rules:
@@ -185,15 +188,16 @@ struct cflayer {
* cannot be accessed after passing it to the below
* layer using dn->transmit().
*
- * - If transmit fails, however, the ownership is returned
- * to thecaller. The caller of "dn->transmit()" must
- * destroy or resend packet.
+ * - Upon error the packet ownership is still passed on,
+ * so the packet shall be freed where error is detected.
+ * Callers of the transmit function shall not free packets,
+ * but errors shall be returned.
*
* - Return value less than zero means error, zero or
* greater than zero means OK.
*
- * result < 0 indicates an error, 0 or positive value
- * indicate success.
+ * Returns result < 0 indicates an error, 0 or positive value
+ * indicates success.
*
* @layr: Pointer to the current layer the receive function
* isimplemented for (this pointer).
@@ -202,7 +206,7 @@ struct cflayer {
int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
/*
- * cttrlcmd() - Control Function upwards in CAIF Stack.
+ * cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking).
* Used for signaling responses (CAIF_CTRLCMD_*_RSP)
* and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
*
@@ -224,9 +228,7 @@ struct cflayer {
*/
int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
- unsigned short prio;
unsigned int id;
- unsigned int type;
char name[CAIF_LAYER_NAME_SZ];
};
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
deleted file mode 100644
index 5bcce55438c..00000000000
--- a/include/net/caif/caif_shm.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#ifndef CAIF_SHM_H_
-#define CAIF_SHM_H_
-
-struct shmdev_layer {
- u32 shm_base_addr;
- u32 shm_total_sz;
- u32 shm_id;
- u32 shm_loopback;
- void *hmbx;
- int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
- int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
- struct shmdev_layer *pshm_dev, void *pshm_drv);
- struct net_device *pshm_netdev;
-};
-
-extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
-extern void caif_shmcore_remove(struct net_device *pshm_netdev);
-
-#endif
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index 87c3d11b8e5..aa6a485b054 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -55,8 +55,8 @@
struct cfspi_xfer {
u16 tx_dma_len;
u16 rx_dma_len;
- void *va_tx;
- dma_addr_t pa_tx;
+ void *va_tx[2];
+ dma_addr_t pa_tx[2];
void *va_rx;
dma_addr_t pa_rx;
};
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index f688478bfb8..70bfd017581 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -14,18 +14,6 @@
struct cfcnfg;
/**
- * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack
- *
- * @CFPHYTYPE_FRAG: Fragmented frames physical interface.
- * @CFPHYTYPE_CAIF: Generic CAIF physical interface
- */
-enum cfcnfg_phy_type {
- CFPHYTYPE_FRAG = 1,
- CFPHYTYPE_CAIF,
- CFPHYTYPE_MAX
-};
-
-/**
* enum cfcnfg_phy_preference - Physical preference HW Abstraction
*
* @CFPHYPREF_UNSPECIFIED: Default physical interface
@@ -46,6 +34,12 @@ enum cfcnfg_phy_preference {
};
/**
+ * cfcnfg_create() - Get the CAIF configuration object given network.
+ * @net: Network for the CAIF configuration object.
+ */
+struct cfcnfg *get_cfcnfg(struct net *net);
+
+/**
* cfcnfg_create() - Create the CAIF configuration object.
*/
struct cfcnfg *cfcnfg_create(void);
@@ -60,23 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
* @cnfg: Pointer to a CAIF configuration object, created by
* cfcnfg_create().
- * @phy_type: Specifies the type of physical interface, e.g.
- * CFPHYTYPE_FRAG.
* @dev: Pointer to link layer device
* @phy_layer: Specify the physical layer. The transmit function
* MUST be set in the structure.
- * @phyid: The assigned physical ID for this layer, used in
- * cfcnfg_add_adapt_layer to specify PHY for the link.
* @pref: The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
* @fcs: Specify if checksum is used in CAIF Framing Layer.
- * @stx: Specify if Start Of Frame eXtention is used.
+ * @head_room: Head space needed by link specific protocol.
*/
-
void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
- u16 *phyid, enum cfcnfg_phy_preference pref,
- bool fcs, bool stx);
+ enum cfcnfg_phy_preference pref,
+ struct cflayer *link_support,
+ bool fcs, int head_room);
/**
* cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
@@ -88,61 +79,12 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer);
/**
- * cfcnfg_disconn_adapt_layer - Disconnects an adaptation layer.
- *
- * @cnfg: Pointer to a CAIF configuration object, created by
- * cfcnfg_create().
- * @adap_layer: Adaptation layer to be removed.
- */
-int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg,
- struct cflayer *adap_layer);
-
-/**
- * cfcnfg_release_adap_layer - Used by client to release the adaptation layer.
- *
- * @adap_layer: Adaptation layer.
- */
-void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
-
-/**
- * cfcnfg_add_adaptation_layer - Add an adaptation layer to the CAIF stack.
- *
- * The adaptation Layer is where the interface to application or higher-level
- * driver functionality is implemented.
- *
- * @cnfg: Pointer to a CAIF configuration object, created by
- * cfcnfg_create().
- * @param: Link setup parameters.
- * @adap_layer: Specify the adaptation layer; the receive and
- * flow-control functions MUST be set in the structure.
- * @ifindex: Link layer interface index used for this connection.
- * @proto_head: Protocol head-space needed by CAIF protocol,
- * excluding link layer.
- * @proto_tail: Protocol tail-space needed by CAIF protocol,
- * excluding link layer.
- */
-int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
- struct cfctrl_link_param *param,
- struct cflayer *adap_layer,
- int *ifindex,
- int *proto_head,
- int *proto_tail);
-
-/**
- * cfcnfg_get_phyid() - Get physical ID, given type.
- * Returns one of the physical interfaces matching the given type.
- * Zero if no match is found.
+ * cfcnfg_set_phy_state() - Set the state of the physical interface device.
* @cnfg: Configuration object
- * @phy_pref: Caif Link Layer preference
+ * @phy_layer: Physical Layer representation
+ * @up: State of device
*/
-struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
- enum cfcnfg_phy_preference phy_pref);
+int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
+ bool up);
-/**
- * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
- * it matches caif physical id with the kernel interface id.
- * @cnfg: Configuration object
- * @ifi: ifindex obtained from socket.c bindtodevice.
- */
-int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
#endif /* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
index e54f6396fa4..f2ae33d23ba 100644
--- a/include/net/caif/cfctrl.h
+++ b/include/net/caif/cfctrl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -121,19 +121,10 @@ int cfctrl_linkup_request(struct cflayer *cfctrl,
struct cflayer *user_layer);
int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
struct cflayer *client);
-void cfctrl_sleep_req(struct cflayer *cfctrl);
-void cfctrl_wake_req(struct cflayer *cfctrl);
-void cfctrl_getstartreason_req(struct cflayer *cfctrl);
+
struct cflayer *cfctrl_create(void);
-void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn);
-void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up);
struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer);
-bool cfctrl_req_eq(struct cfctrl_request_info *r1,
- struct cfctrl_request_info *r2);
-void cfctrl_insert_req(struct cfctrl *ctrl,
- struct cfctrl_request_info *req);
-struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
- struct cfctrl_request_info *req);
-void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
+int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
+void cfctrl_remove(struct cflayer *layr);
#endif /* CFCTRL_H_ */
diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h
index 3f14d2e1ce6..a06e33fbaa8 100644
--- a/include/net/caif/cffrml.h
+++ b/include/net/caif/cffrml.h
@@ -1,16 +1,21 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
#ifndef CFFRML_H_
#define CFFRML_H_
#include <net/caif/caif_layer.h>
+#include <linux/netdevice.h>
struct cffrml;
-struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs);
+void cffrml_free(struct cflayer *layr);
void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
+void cffrml_put(struct cflayer *layr);
+void cffrml_hold(struct cflayer *layr);
+int cffrml_refcnt_read(struct cflayer *layr);
#endif /* CFFRML_H_ */
diff --git a/include/net/caif/cfmuxl.h b/include/net/caif/cfmuxl.h
index 4e1b4f33423..752999572f2 100644
--- a/include/net/caif/cfmuxl.h
+++ b/include/net/caif/cfmuxl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -16,7 +16,5 @@ int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid);
struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid);
int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *up, u8 phyid);
struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 linkid);
-bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid);
-u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id);
#endif /* CFMUXL_H_ */
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fbc681beff5..1c1ad46250d 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -16,12 +16,6 @@ struct cfpkt;
*/
struct cfpkt *cfpkt_create(u16 len);
-/* Create a CAIF packet.
- * data Data to copy.
- * len Length of packet to be created
- * @return New packet.
- */
-struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len);
/*
* Destroy a CAIF Packet.
* pkt Packet to be destoyed.
@@ -181,22 +175,6 @@ u16 cfpkt_iterate(struct cfpkt *pkt,
u16 (*iter_func)(u16 chks, void *buf, u16 len),
u16 data);
-/* Append by giving user access to packet buffer
- * cfpkt Packet to append to
- * buf Buffer inside pkt that user shall copy data into
- * buflen Length of buffer and number of bytes added to packet
- * @return 0 on error, 1 on success
- */
-int cfpkt_raw_append(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
-
-/* Extract by giving user access to packet buffer
- * cfpkt Packet to extract from
- * buf Buffer inside pkt that user shall copy data from
- * buflen Length of buffer and number of bytes removed from packet
- * @return 0 on error, 1 on success
- */
-int cfpkt_raw_extract(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
-
/* Map from a "native" packet (e.g. Linux Socket Buffer) to a CAIF packet.
* dir - Direction indicating whether this packet is to be sent or received.
* nativepkt - The native packet to be transformed to a CAIF packet
@@ -211,64 +189,17 @@ struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt);
void *cfpkt_tonative(struct cfpkt *pkt);
/*
- * Insert a packet in the packet queue.
- * pktq Packet queue to insert into
- * pkt Packet to be inserted in queue
- * prio Priority of packet
- */
-void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt,
- unsigned short prio);
-
-/*
- * Remove a packet from the packet queue.
- * pktq Packet queue to fetch packets from.
- * @return Dequeued packet.
- */
-struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq);
-
-/*
- * Peek into a packet from the packet queue.
- * pktq Packet queue to fetch packets from.
- * @return Peeked packet.
- */
-struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq);
-
-/*
- * Initiates the packet queue.
- * @return Pointer to new packet queue.
- */
-struct cfpktq *cfpktq_create(void);
-
-/*
- * Get the number of packets in the queue.
- * pktq Packet queue to fetch count from.
- * @return Number of packets in queue.
- */
-int cfpkt_qcount(struct cfpktq *pktq);
-
-/*
- * Put content of packet into buffer for debuging purposes.
- * pkt Packet to copy data from
- * buf Buffer to copy data into
- * buflen Length of data to copy
- * @return Pointer to copied data
- */
-char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen);
-
-/*
- * Clones a packet and releases the original packet.
- * This is used for taking ownership of a packet e.g queueing.
- * pkt Packet to clone and release.
- * @return Cloned packet.
- */
-struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt);
-
-
-/*
* Returns packet information for a packet.
* pkt Packet to get info from;
* @return Packet information
*/
struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
-/*! @} */
+
+/** cfpkt_set_prio - set priority for a CAIF packet.
+ *
+ * @pkt: The CAIF packet to be adjusted.
+ * @prio: one of TC_PRIO_ constants.
+ */
+void cfpkt_set_prio(struct cfpkt *pkt, int prio);
+
#endif /* CFPKT_H_ */
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index b8374321b36..b5b020f3c72 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -8,5 +8,5 @@
#define CFSERL_H_
#include <net/caif/caif_layer.h>
-struct cflayer *cfserl_create(int type, int instance, bool use_stx);
-#endif /* CFSERL_H_ */
+struct cflayer *cfserl_create(int instance, bool use_stx);
+#endif
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index b1fa87ee099..cd47705c2cc 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson AB 2010
- * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
@@ -10,6 +10,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/kref.h>
+#include <linux/rculist.h>
struct cfsrvl {
struct cflayer layer;
@@ -17,12 +18,13 @@ struct cfsrvl {
bool phy_flow_on;
bool modem_flow_on;
bool supports_flowctrl;
- void (*release)(struct kref *);
+ void (*release)(struct cflayer *layer);
struct dev_info dev_info;
- struct kref ref;
+ void (*hold)(struct cflayer *lyr);
+ void (*put)(struct cflayer *lyr);
+ struct rcu_head rcu;
};
-void cfsrvl_release(struct kref *kref);
struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
@@ -30,8 +32,12 @@ struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
+
+void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
-void cfservl_destroy(struct cflayer *layer);
+
void cfsrvl_init(struct cfsrvl *service,
u8 channel_id,
struct dev_info *dev_info,
@@ -41,23 +47,19 @@ u8 cfsrvl_getphyid(struct cflayer *layer);
static inline void cfsrvl_get(struct cflayer *layr)
{
- struct cfsrvl *s;
- if (layr == NULL)
+ struct cfsrvl *s = container_of(layr, struct cfsrvl, layer);
+ if (layr == NULL || layr->up == NULL || s->hold == NULL)
return;
- s = container_of(layr, struct cfsrvl, layer);
- kref_get(&s->ref);
+
+ s->hold(layr->up);
}
static inline void cfsrvl_put(struct cflayer *layr)
{
- struct cfsrvl *s;
- if (layr == NULL)
+ struct cfsrvl *s = container_of(layr, struct cfsrvl, layer);
+ if (layr == NULL || layr->up == NULL || s->hold == NULL)
return;
- s = container_of(layr, struct cfsrvl, layer);
- WARN_ON(!s->release);
- if (s->release)
- kref_put(&s->ref, s->release);
+ s->put(layr->up);
}
-
#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h
new file mode 100644
index 00000000000..25baddc4fbe
--- /dev/null
+++ b/include/net/cfg80211-wext.h
@@ -0,0 +1,55 @@
+#ifndef __NET_CFG80211_WEXT_H
+#define __NET_CFG80211_WEXT_H
+/*
+ * 802.11 device and configuration interface -- wext handlers
+ *
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+/*
+ * Temporary wext handlers & helper functions
+ *
+ * These are used only by drivers that aren't yet fully
+ * converted to cfg80211.
+ */
+int cfg80211_wext_giwname(struct net_device *dev,
+ struct iw_request_info *info,
+ char *name, char *extra);
+int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
+ u32 *mode, char *extra);
+int cfg80211_wext_siwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int cfg80211_wext_giwscan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_giwrange(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_siwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_giwrts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra);
+int cfg80211_wext_siwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwfrag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra);
+int cfg80211_wext_giwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra);
+
+#endif /* __NET_CFG80211_WEXT_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0663945cfa4..e46c437944f 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -13,18 +13,15 @@
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/list.h>
+#include <linux/bug.h>
#include <linux/netlink.h>
#include <linux/skbuff.h>
#include <linux/nl80211.h>
#include <linux/if_ether.h>
#include <linux/ieee80211.h>
+#include <linux/net.h>
#include <net/regulatory.h>
-/* remove once we remove the wext stuff */
-#include <net/iw_handler.h>
-#include <linux/wireless.h>
-
-
/**
* DOC: Introduction
*
@@ -62,6 +59,8 @@
* structures here describe these capabilities in detail.
*/
+struct wiphy;
+
/*
* wireless hardware capability structures
*/
@@ -74,11 +73,13 @@
*
* @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
* @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
+ * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
* @IEEE80211_NUM_BANDS: number of defined bands
*/
enum ieee80211_band {
IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
+ IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
/* keep last */
IEEE80211_NUM_BANDS
@@ -90,27 +91,54 @@ enum ieee80211_band {
* Channel flags set by the regulatory control code.
*
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
- * @IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted
- * on this channel.
- * @IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
+ * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
+ * sending probe requests or beaconing.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
* is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
* is not permitted.
+ * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
+ * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
+ * this flag indicates that an 80 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band,
+ * this flag indicates that an 160 MHz channel cannot use this
+ * channel as the control or any of the secondary channels.
+ * This may be due to the driver or due to regulatory bandwidth
+ * restrictions.
+ * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
+ * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
+ * on this channel.
+ * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
+ * on this channel.
+ *
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
- IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
- IEEE80211_CHAN_NO_IBSS = 1<<2,
+ IEEE80211_CHAN_NO_IR = 1<<1,
+ /* hole at 1<<2 */
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ IEEE80211_CHAN_NO_OFDM = 1<<6,
+ IEEE80211_CHAN_NO_80MHZ = 1<<7,
+ IEEE80211_CHAN_NO_160MHZ = 1<<8,
+ IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
+ IEEE80211_CHAN_GO_CONCURRENT = 1<<10,
+ IEEE80211_CHAN_NO_20MHZ = 1<<11,
+ IEEE80211_CHAN_NO_10MHZ = 1<<12,
};
#define IEEE80211_CHAN_NO_HT40 \
(IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+#define IEEE80211_DFS_MIN_CAC_TIME_MS 60000
+#define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000)
+
/**
* struct ieee80211_channel - channel definition
*
@@ -125,11 +153,16 @@ enum ieee80211_channel_flags {
* @band: band this channel belongs to.
* @max_antenna_gain: maximum antenna gain in dBi
* @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
* @beacon_found: helper to regulatory code to indicate when a beacon
* has been found on this channel. Use regulatory_hint_found_beacon()
* to enable this, this is useful only on 5 GHz band.
* @orig_mag: internal use
* @orig_mpwr: internal use
+ * @dfs_state: current state of this channel. Only relevant if radar is required
+ * on this channel.
+ * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
+ * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
*/
struct ieee80211_channel {
enum ieee80211_band band;
@@ -138,9 +171,13 @@ struct ieee80211_channel {
u32 flags;
int max_antenna_gain;
int max_power;
+ int max_reg_power;
bool beacon_found;
u32 orig_flags;
int orig_mag, orig_mpwr;
+ enum nl80211_dfs_state dfs_state;
+ unsigned long dfs_state_entered;
+ unsigned int dfs_cac_ms;
};
/**
@@ -163,6 +200,8 @@ struct ieee80211_channel {
* when used with 802.11g (on the 2.4 GHz band); filled by the
* core code when registering the wiphy.
* @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode.
+ * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode
+ * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
*/
enum ieee80211_rate_flags {
IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
@@ -170,6 +209,8 @@ enum ieee80211_rate_flags {
IEEE80211_RATE_MANDATORY_B = 1<<2,
IEEE80211_RATE_MANDATORY_G = 1<<3,
IEEE80211_RATE_ERP_G = 1<<4,
+ IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
+ IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
};
/**
@@ -213,6 +254,22 @@ struct ieee80211_sta_ht_cap {
};
/**
+ * struct ieee80211_sta_vht_cap - STA's VHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ac VHT capabilities for an STA.
+ *
+ * @vht_supported: is VHT supported by the STA
+ * @cap: VHT capabilities map as described in 802.11ac spec
+ * @vht_mcs: Supported VHT MCS rates
+ */
+struct ieee80211_sta_vht_cap {
+ bool vht_supported;
+ u32 cap; /* use IEEE80211_VHT_CAP_ */
+ struct ieee80211_vht_mcs_info vht_mcs;
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
@@ -227,6 +284,7 @@ struct ieee80211_sta_ht_cap {
* rates" IE, i.e. CCK rates first, then OFDM.
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
+ * @vht_cap: VHT capabilities in this band
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
@@ -235,6 +293,7 @@ struct ieee80211_supported_band {
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
};
/*
@@ -258,14 +317,14 @@ struct ieee80211_supported_band {
/**
* struct vif_params - describes virtual interface parameters
- * @mesh_id: mesh ID to use
- * @mesh_id_len: length of the mesh ID
* @use_4addr: use 4-address frames
+ * @macaddr: address to use for this virtual interface. This will only
+ * be used for non-netdevice interfaces. If this parameter is set
+ * to zero address the driver may determine the address as needed.
*/
struct vif_params {
- u8 *mesh_id;
- int mesh_id_len;
int use_4addr;
+ u8 macaddr[ETH_ALEN];
};
/**
@@ -282,14 +341,177 @@ struct vif_params {
* @seq_len: length of @seq.
*/
struct key_params {
- u8 *key;
- u8 *seq;
+ const u8 *key;
+ const u8 *seq;
int key_len;
int seq_len;
u32 cipher;
};
/**
+ * struct cfg80211_chan_def - channel definition
+ * @chan: the (control) channel
+ * @width: channel width
+ * @center_freq1: center frequency of first segment
+ * @center_freq2: center frequency of second segment
+ * (only with 80+80 MHz)
+ */
+struct cfg80211_chan_def {
+ struct ieee80211_channel *chan;
+ enum nl80211_chan_width width;
+ u32 center_freq1;
+ u32 center_freq2;
+};
+
+/**
+ * cfg80211_get_chandef_type - return old channel type from chandef
+ * @chandef: the channel definition
+ *
+ * Return: The old channel type (NOHT, HT20, HT40+/-) from a given
+ * chandef, which must have a bandwidth allowing this conversion.
+ */
+static inline enum nl80211_channel_type
+cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return NL80211_CHAN_NO_HT;
+ case NL80211_CHAN_WIDTH_20:
+ return NL80211_CHAN_HT20;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ return NL80211_CHAN_HT40PLUS;
+ return NL80211_CHAN_HT40MINUS;
+ default:
+ WARN_ON(1);
+ return NL80211_CHAN_NO_HT;
+ }
+}
+
+/**
+ * cfg80211_chandef_create - create channel definition using channel type
+ * @chandef: the channel definition struct to fill
+ * @channel: the control channel
+ * @chantype: the channel type
+ *
+ * Given a channel type, create a channel definition.
+ */
+void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type chantype);
+
+/**
+ * cfg80211_chandef_identical - check if two channel definitions are identical
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Return: %true if the channels defined by the channel definitions are
+ * identical, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
+ const struct cfg80211_chan_def *chandef2)
+{
+ return (chandef1->chan == chandef2->chan &&
+ chandef1->width == chandef2->width &&
+ chandef1->center_freq1 == chandef2->center_freq1 &&
+ chandef1->center_freq2 == chandef2->center_freq2);
+}
+
+/**
+ * cfg80211_chandef_compatible - check if two channel definitions are compatible
+ * @chandef1: first channel definition
+ * @chandef2: second channel definition
+ *
+ * Return: %NULL if the given channel definitions are incompatible,
+ * chandef1 or chandef2 otherwise.
+ */
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
+ const struct cfg80211_chan_def *chandef2);
+
+/**
+ * cfg80211_chandef_valid - check if a channel definition is valid
+ * @chandef: the channel definition to check
+ * Return: %true if the channel definition is valid. %false otherwise.
+ */
+bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_usable - check if secondary channels can be used
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * @prohibited_flags: the regulatory channel flags that must not be set
+ * Return: %true if secondary channels are usable. %false otherwise.
+ */
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags);
+
+/**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * @iftype: the interface type as specified in &enum nl80211_iftype
+ * Returns:
+ * 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
+/**
+ * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ *
+ * In some channel types, not all rates may be used - for example CCK
+ * rates may not be used in 5/10 MHz channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: rate flags which apply for this channel
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return IEEE80211_RATE_SUPPORTS_5MHZ;
+ case NL80211_CHAN_WIDTH_10:
+ return IEEE80211_RATE_SUPPORTS_10MHZ;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * ieee80211_chandef_max_power - maximum transmission power for the chandef
+ *
+ * In some regulations, the transmit power may depend on the configured channel
+ * bandwidth which may be defined as dBm/MHz. This function returns the actual
+ * max_power for non-standard (20 MHz) channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: maximum allowed transmission power in dBm for the chandef
+ */
+static inline int
+ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return min(chandef->chan->max_reg_power - 6,
+ chandef->chan->max_power);
+ case NL80211_CHAN_WIDTH_10:
+ return min(chandef->chan->max_reg_power - 3,
+ chandef->chan->max_power);
+ default:
+ break;
+ }
+ return chandef->chan->max_power;
+}
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -319,7 +541,7 @@ enum survey_info_flags {
* @channel: the channel this survey record reports, mandatory
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
- * optional
+ * optional
* @channel_time: amount of time in ms the radio spent on the channel
* @channel_time_busy: amount of time the primary channel was sensed busy
* @channel_time_ext_busy: amount of time the extension channel was sensed busy
@@ -343,36 +565,170 @@ struct survey_info {
};
/**
- * struct beacon_parameters - beacon parameters
- *
- * Used to configure the beacon for an interface.
- *
+ * struct cfg80211_crypto_settings - Crypto settings
+ * @wpa_versions: indicates which, if any, WPA versions are enabled
+ * (from enum nl80211_wpa_versions)
+ * @cipher_group: group key cipher suite (or 0 if unset)
+ * @n_ciphers_pairwise: number of AP supported unicast ciphers
+ * @ciphers_pairwise: unicast key cipher suites
+ * @n_akm_suites: number of AKM suites
+ * @akm_suites: AKM suites
+ * @control_port: Whether user space controls IEEE 802.1X port, i.e.,
+ * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
+ * required to assume that the port is unauthorized until authorized by
+ * user space. Otherwise, port is marked authorized by default.
+ * @control_port_ethertype: the control port protocol that should be
+ * allowed through even on unauthorized ports
+ * @control_port_no_encrypt: TRUE to prevent encryption of control port
+ * protocol frames.
+ */
+struct cfg80211_crypto_settings {
+ u32 wpa_versions;
+ u32 cipher_group;
+ int n_ciphers_pairwise;
+ u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
+ int n_akm_suites;
+ u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
+ bool control_port;
+ __be16 control_port_ethertype;
+ bool control_port_no_encrypt;
+};
+
+/**
+ * struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
- * or %NULL if not changed
+ * or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
- * or %NULL if not changed
- * @interval: beacon interval or zero if not changed
- * @dtim_period: DTIM period or zero if not changed
+ * or %NULL if not changed
* @head_len: length of @head
* @tail_len: length of @tail
+ * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
+ * @beacon_ies_len: length of beacon_ies in octets
+ * @proberesp_ies: extra information element(s) to add into Probe Response
+ * frames or %NULL
+ * @proberesp_ies_len: length of proberesp_ies in octets
+ * @assocresp_ies: extra information element(s) to add into (Re)Association
+ * Response frames or %NULL
+ * @assocresp_ies_len: length of assocresp_ies in octets
+ * @probe_resp_len: length of probe response template (@probe_resp)
+ * @probe_resp: probe response template (AP mode only)
+ */
+struct cfg80211_beacon_data {
+ const u8 *head, *tail;
+ const u8 *beacon_ies;
+ const u8 *proberesp_ies;
+ const u8 *assocresp_ies;
+ const u8 *probe_resp;
+
+ size_t head_len, tail_len;
+ size_t beacon_ies_len;
+ size_t proberesp_ies_len;
+ size_t assocresp_ies_len;
+ size_t probe_resp_len;
+};
+
+struct mac_address {
+ u8 addr[ETH_ALEN];
+};
+
+/**
+ * struct cfg80211_acl_data - Access control list data
+ *
+ * @acl_policy: ACL policy to be applied on the station's
+ * entry specified by mac_addr
+ * @n_acl_entries: Number of MAC address entries passed
+ * @mac_addrs: List of MAC addresses of stations to be used for ACL
+ */
+struct cfg80211_acl_data {
+ enum nl80211_acl_policy acl_policy;
+ int n_acl_entries;
+
+ /* Keep it last */
+ struct mac_address mac_addrs[];
+};
+
+/**
+ * struct cfg80211_ap_settings - AP configuration
+ *
+ * Used to configure an AP interface.
+ *
+ * @chandef: defines the channel to use
+ * @beacon: beacon data
+ * @beacon_interval: beacon interval
+ * @dtim_period: DTIM period
+ * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from
+ * user space)
+ * @ssid_len: length of @ssid
+ * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames
+ * @crypto: crypto settings
+ * @privacy: the BSS uses privacy
+ * @auth_type: Authentication type (algorithm)
+ * @inactivity_timeout: time in seconds to determine station's inactivity.
+ * @p2p_ctwindow: P2P CT Window
+ * @p2p_opp_ps: P2P opportunistic PS
+ * @acl: ACL configuration used by the drivers which has support for
+ * MAC address based access control
+ */
+struct cfg80211_ap_settings {
+ struct cfg80211_chan_def chandef;
+
+ struct cfg80211_beacon_data beacon;
+
+ int beacon_interval, dtim_period;
+ const u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_hidden_ssid hidden_ssid;
+ struct cfg80211_crypto_settings crypto;
+ bool privacy;
+ enum nl80211_auth_type auth_type;
+ int inactivity_timeout;
+ u8 p2p_ctwindow;
+ bool p2p_opp_ps;
+ const struct cfg80211_acl_data *acl;
+};
+
+/**
+ * struct cfg80211_csa_settings - channel switch settings
+ *
+ * Used for channel switch
+ *
+ * @chandef: defines the channel to use after the switch
+ * @beacon_csa: beacon data while performing the switch
+ * @counter_offsets_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offsets_presp: offsets of the counters within the probe response
+ * @n_counter_offsets_beacon: number of csa counters the beacon (tail)
+ * @n_counter_offsets_presp: number of csa counters in the probe response
+ * @beacon_after: beacon data to be used on the new channel
+ * @radar_required: whether radar detection is required on the new channel
+ * @block_tx: whether transmissions should be blocked while changing
+ * @count: number of beacons until switch
*/
-struct beacon_parameters {
- u8 *head, *tail;
- int interval, dtim_period;
- int head_len, tail_len;
+struct cfg80211_csa_settings {
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_beacon_data beacon_csa;
+ const u16 *counter_offsets_beacon;
+ const u16 *counter_offsets_presp;
+ unsigned int n_counter_offsets_beacon;
+ unsigned int n_counter_offsets_presp;
+ struct cfg80211_beacon_data beacon_after;
+ bool radar_required;
+ bool block_tx;
+ u8 count;
};
/**
- * enum plink_action - actions to perform in mesh peers
+ * enum station_parameters_apply_mask - station parameter values to apply
+ * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
+ * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
+ * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
*
- * @PLINK_ACTION_INVALID: action 0 is reserved
- * @PLINK_ACTION_OPEN: start mesh peer link establishment
- * @PLINK_ACTION_BLOCK: block traffic from this mesh peer
+ * Not all station parameters have in-band "no change" signalling,
+ * for those that don't these flags will are used.
*/
-enum plink_actions {
- PLINK_ACTION_INVALID,
- PLINK_ACTION_OPEN,
- PLINK_ACTION_BLOCK,
+enum station_parameters_apply_mask {
+ STATION_PARAM_APPLY_UAPSD = BIT(0),
+ STATION_PARAM_APPLY_CAPABILITY = BIT(1),
+ STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
};
/**
@@ -391,20 +747,98 @@ enum plink_actions {
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
* @plink_action: plink action to take
+ * @plink_state: set the peer link state for a station
* @ht_capa: HT capabilities of station
+ * @vht_capa: VHT capabilities of station
+ * @uapsd_queues: bitmap of queues configured for uapsd. same format
+ * as the AC bitmap in the QoS info field
+ * @max_sp: max Service Period. same format as the MAX_SP in the
+ * QoS info field (but already shifted down)
+ * @sta_modify_mask: bitmap indicating which parameters changed
+ * (for those that don't have a natural "no change" value),
+ * see &enum station_parameters_apply_mask
+ * @local_pm: local link-specific mesh power save mode (no change when set
+ * to unknown)
+ * @capability: station capability
+ * @ext_capab: extended capabilities of the station
+ * @ext_capab_len: number of extended capabilities
+ * @supported_channels: supported channels in IEEE 802.11 format
+ * @supported_channels_len: number of supported channels
+ * @supported_oper_classes: supported oper classes in IEEE 802.11 format
+ * @supported_oper_classes_len: number of supported operating classes
+ * @opmode_notif: operating mode field from Operating Mode Notification
+ * @opmode_notif_used: information if operating mode field is used
*/
struct station_parameters {
- u8 *supported_rates;
+ const u8 *supported_rates;
struct net_device *vlan;
u32 sta_flags_mask, sta_flags_set;
+ u32 sta_modify_mask;
int listen_interval;
u16 aid;
u8 supported_rates_len;
u8 plink_action;
- struct ieee80211_ht_cap *ht_capa;
+ u8 plink_state;
+ const struct ieee80211_ht_cap *ht_capa;
+ const struct ieee80211_vht_cap *vht_capa;
+ u8 uapsd_queues;
+ u8 max_sp;
+ enum nl80211_mesh_power_mode local_pm;
+ u16 capability;
+ const u8 *ext_capab;
+ u8 ext_capab_len;
+ const u8 *supported_channels;
+ u8 supported_channels_len;
+ const u8 *supported_oper_classes;
+ u8 supported_oper_classes_len;
+ u8 opmode_notif;
+ bool opmode_notif_used;
+};
+
+/**
+ * enum cfg80211_station_type - the type of station being modified
+ * @CFG80211_STA_AP_CLIENT: client of an AP interface
+ * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
+ * the AP MLME in the device
+ * @CFG80211_STA_AP_STA: AP station on managed interface
+ * @CFG80211_STA_IBSS: IBSS station
+ * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry
+ * while TDLS setup is in progress, it moves out of this state when
+ * being marked authorized; use this only if TDLS with external setup is
+ * supported/used)
+ * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active
+ * entry that is operating, has been marked authorized by userspace)
+ * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed)
+ * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed)
+ */
+enum cfg80211_station_type {
+ CFG80211_STA_AP_CLIENT,
+ CFG80211_STA_AP_MLME_CLIENT,
+ CFG80211_STA_AP_STA,
+ CFG80211_STA_IBSS,
+ CFG80211_STA_TDLS_PEER_SETUP,
+ CFG80211_STA_TDLS_PEER_ACTIVE,
+ CFG80211_STA_MESH_PEER_KERNEL,
+ CFG80211_STA_MESH_PEER_USER,
};
/**
+ * cfg80211_check_station_change - validate parameter changes
+ * @wiphy: the wiphy this operates on
+ * @params: the new parameters for a station
+ * @statype: the type of station being modified
+ *
+ * Utility function for the @change_station driver method. Call this function
+ * with the appropriate station type looking up the station (and checking that
+ * it exists). It will verify whether the station change is acceptable, and if
+ * not will return an error code. Note that it may modify the parameters for
+ * backward compatibility reasons, so don't use them before calling this.
+ */
+int cfg80211_check_station_change(struct wiphy *wiphy,
+ struct station_parameters *params,
+ enum cfg80211_station_type statype);
+
+/**
* enum station_info_flags - station information flags
*
* Used by the driver to indicate which info in &struct station_info
@@ -413,32 +847,64 @@ struct station_parameters {
* @STATION_INFO_INACTIVE_TIME: @inactive_time filled
* @STATION_INFO_RX_BYTES: @rx_bytes filled
* @STATION_INFO_TX_BYTES: @tx_bytes filled
+ * @STATION_INFO_RX_BYTES64: @rx_bytes filled with 64-bit value
+ * @STATION_INFO_TX_BYTES64: @tx_bytes filled with 64-bit value
* @STATION_INFO_LLID: @llid filled
* @STATION_INFO_PLID: @plid filled
* @STATION_INFO_PLINK_STATE: @plink_state filled
* @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled
- * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
- * @STATION_INFO_RX_PACKETS: @rx_packets filled
- * @STATION_INFO_TX_PACKETS: @tx_packets filled
+ * @STATION_INFO_TX_BITRATE: @txrate fields are filled
+ * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
+ * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
+ * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
* @STATION_INFO_TX_RETRIES: @tx_retries filled
* @STATION_INFO_TX_FAILED: @tx_failed filled
* @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
+ * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
+ * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
+ * @STATION_INFO_BSS_PARAM: @bss_param filled
+ * @STATION_INFO_CONNECTED_TIME: @connected_time filled
+ * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
+ * @STATION_INFO_STA_FLAGS: @sta_flags filled
+ * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
+ * @STATION_INFO_T_OFFSET: @t_offset filled
+ * @STATION_INFO_LOCAL_PM: @local_pm filled
+ * @STATION_INFO_PEER_PM: @peer_pm filled
+ * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
+ * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
+ * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
+ * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled
*/
enum station_info_flags {
- STATION_INFO_INACTIVE_TIME = 1<<0,
- STATION_INFO_RX_BYTES = 1<<1,
- STATION_INFO_TX_BYTES = 1<<2,
- STATION_INFO_LLID = 1<<3,
- STATION_INFO_PLID = 1<<4,
- STATION_INFO_PLINK_STATE = 1<<5,
- STATION_INFO_SIGNAL = 1<<6,
- STATION_INFO_TX_BITRATE = 1<<7,
- STATION_INFO_RX_PACKETS = 1<<8,
- STATION_INFO_TX_PACKETS = 1<<9,
- STATION_INFO_TX_RETRIES = 1<<10,
- STATION_INFO_TX_FAILED = 1<<11,
- STATION_INFO_RX_DROP_MISC = 1<<12,
+ STATION_INFO_INACTIVE_TIME = BIT(0),
+ STATION_INFO_RX_BYTES = BIT(1),
+ STATION_INFO_TX_BYTES = BIT(2),
+ STATION_INFO_LLID = BIT(3),
+ STATION_INFO_PLID = BIT(4),
+ STATION_INFO_PLINK_STATE = BIT(5),
+ STATION_INFO_SIGNAL = BIT(6),
+ STATION_INFO_TX_BITRATE = BIT(7),
+ STATION_INFO_RX_PACKETS = BIT(8),
+ STATION_INFO_TX_PACKETS = BIT(9),
+ STATION_INFO_TX_RETRIES = BIT(10),
+ STATION_INFO_TX_FAILED = BIT(11),
+ STATION_INFO_RX_DROP_MISC = BIT(12),
+ STATION_INFO_SIGNAL_AVG = BIT(13),
+ STATION_INFO_RX_BITRATE = BIT(14),
+ STATION_INFO_BSS_PARAM = BIT(15),
+ STATION_INFO_CONNECTED_TIME = BIT(16),
+ STATION_INFO_ASSOC_REQ_IES = BIT(17),
+ STATION_INFO_STA_FLAGS = BIT(18),
+ STATION_INFO_BEACON_LOSS_COUNT = BIT(19),
+ STATION_INFO_T_OFFSET = BIT(20),
+ STATION_INFO_LOCAL_PM = BIT(21),
+ STATION_INFO_PEER_PM = BIT(22),
+ STATION_INFO_NONPEER_PM = BIT(23),
+ STATION_INFO_RX_BYTES64 = BIT(24),
+ STATION_INFO_TX_BYTES64 = BIT(25),
+ STATION_INFO_CHAIN_SIGNAL = BIT(26),
+ STATION_INFO_CHAIN_SIGNAL_AVG = BIT(27),
+ STATION_INFO_EXPECTED_THROUGHPUT = BIT(28),
};
/**
@@ -447,14 +913,24 @@ enum station_info_flags {
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
*
- * @RATE_INFO_FLAGS_MCS: @tx_bitrate_mcs filled
- * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 Mhz width transmission
+ * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
+ * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
+ * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 MHz width transmission
+ * @RATE_INFO_FLAGS_80_MHZ_WIDTH: 80 MHz width transmission
+ * @RATE_INFO_FLAGS_80P80_MHZ_WIDTH: 80+80 MHz width transmission
+ * @RATE_INFO_FLAGS_160_MHZ_WIDTH: 160 MHz width transmission
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
+ * @RATE_INFO_FLAGS_60G: 60GHz MCS
*/
enum rate_info_flags {
- RATE_INFO_FLAGS_MCS = 1<<0,
- RATE_INFO_FLAGS_40_MHZ_WIDTH = 1<<1,
- RATE_INFO_FLAGS_SHORT_GI = 1<<2,
+ RATE_INFO_FLAGS_MCS = BIT(0),
+ RATE_INFO_FLAGS_VHT_MCS = BIT(1),
+ RATE_INFO_FLAGS_40_MHZ_WIDTH = BIT(2),
+ RATE_INFO_FLAGS_80_MHZ_WIDTH = BIT(3),
+ RATE_INFO_FLAGS_80P80_MHZ_WIDTH = BIT(4),
+ RATE_INFO_FLAGS_160_MHZ_WIDTH = BIT(5),
+ RATE_INFO_FLAGS_SHORT_GI = BIT(6),
+ RATE_INFO_FLAGS_60G = BIT(7),
};
/**
@@ -465,57 +941,153 @@ enum rate_info_flags {
* @flags: bitflag of flags from &enum rate_info_flags
* @mcs: mcs index if struct describes a 802.11n bitrate
* @legacy: bitrate in 100kbit/s for 802.11abg
+ * @nss: number of streams (VHT only)
*/
struct rate_info {
u8 flags;
u8 mcs;
u16 legacy;
+ u8 nss;
};
/**
+ * enum station_info_rate_flags - bitrate info flags
+ *
+ * Used by the driver to indicate the specific rate transmission
+ * type for 802.11n transmissions.
+ *
+ * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled
+ * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled
+ * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
+ */
+enum bss_param_flags {
+ BSS_PARAM_FLAGS_CTS_PROT = 1<<0,
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1,
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2,
+};
+
+/**
+ * struct sta_bss_parameters - BSS parameters for the attached station
+ *
+ * Information about the currently associated BSS
+ *
+ * @flags: bitflag of flags from &enum bss_param_flags
+ * @dtim_period: DTIM period for the BSS
+ * @beacon_interval: beacon interval
+ */
+struct sta_bss_parameters {
+ u8 flags;
+ u8 dtim_period;
+ u16 beacon_interval;
+};
+
+#define IEEE80211_MAX_CHAINS 4
+
+/**
* struct station_info - station information
*
* Station information filled by driver for get_station() and dump_station.
*
* @filled: bitflag of flags from &enum station_info_flags
+ * @connected_time: time(in secs) since a station is last connected
* @inactive_time: time since last station activity (tx/rx) in milliseconds
* @rx_bytes: bytes received from this station
* @tx_bytes: bytes transmitted to this station
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
- * @signal: signal strength of last received packet in dBm
- * @txrate: current unicast bitrate to this station
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
+ * @txrate: current unicast bitrate from this station
+ * @rxrate: current unicast bitrate to this station
* @rx_packets: packets received from this station
* @tx_packets: packets transmitted to this station
* @tx_retries: cumulative retry counts
* @tx_failed: number of failed transmissions (retries exceeded, no ACK)
* @rx_dropped_misc: Dropped for un-specified reason.
+ * @bss_param: current BSS parameters
* @generation: generation number for nl80211 dumps.
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @assoc_req_ies: IEs from (Re)Association Request.
+ * This is used only when in AP mode with drivers that do not use
+ * user space MLME/SME implementation. The information is provided for
+ * the cfg80211_new_sta() calls to notify user space of the IEs.
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
+ * @sta_flags: station flags mask & values
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @t_offset: Time offset of the station relative to this host.
+ * @local_pm: local mesh STA power save mode
+ * @peer_pm: peer mesh STA power save mode
+ * @nonpeer_pm: non-peer mesh STA power save mode
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ * towards this station.
*/
struct station_info {
u32 filled;
+ u32 connected_time;
u32 inactive_time;
- u32 rx_bytes;
- u32 tx_bytes;
+ u64 rx_bytes;
+ u64 tx_bytes;
u16 llid;
u16 plid;
u8 plink_state;
s8 signal;
+ s8 signal_avg;
+
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
struct rate_info txrate;
+ struct rate_info rxrate;
u32 rx_packets;
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
u32 rx_dropped_misc;
+ struct sta_bss_parameters bss_param;
+ struct nl80211_sta_flag_update sta_flags;
int generation;
+
+ const u8 *assoc_req_ies;
+ size_t assoc_req_ies_len;
+
+ u32 beacon_loss_count;
+ s64 t_offset;
+ enum nl80211_mesh_power_mode local_pm;
+ enum nl80211_mesh_power_mode peer_pm;
+ enum nl80211_mesh_power_mode nonpeer_pm;
+
+ u32 expected_throughput;
+
+ /*
+ * Note: Add a new enum station_info_flags value for each new field and
+ * use it to check which fields are initialized.
+ */
};
/**
+ * cfg80211_get_station - retrieve information about a given station
+ * @dev: the device where the station is supposed to be connected to
+ * @mac_addr: the mac address of the station of interest
+ * @sinfo: pointer to the structure to fill with the information
+ *
+ * Returns 0 on success and sinfo is filled with the available information
+ * otherwise returns a negative error code and the content of sinfo has to be
+ * considered undefined.
+ */
+int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo);
+
+/**
* enum monitor_flags - monitor flags
*
* Monitor interface configuration flags. Note that these must be the bits
@@ -526,6 +1098,7 @@ struct station_info {
* @MONITOR_FLAG_CONTROL: pass control frames
* @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
* @MONITOR_FLAG_COOK_FRAMES: report frames after processing
+ * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
*/
enum monitor_flags {
MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
@@ -533,6 +1106,7 @@ enum monitor_flags {
MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
+ MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE,
};
/**
@@ -605,39 +1179,166 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * @ht_opmode: HT Operation mode
+ * (u16 = opmode, -1 = do not change)
+ * @p2p_ctwindow: P2P CT Window (-1 = no change)
+ * @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
*/
struct bss_parameters {
int use_cts_prot;
int use_short_preamble;
int use_short_slot_time;
- u8 *basic_rates;
+ const u8 *basic_rates;
u8 basic_rates_len;
int ap_isolate;
+ int ht_opmode;
+ s8 p2p_ctwindow, p2p_opp_ps;
};
+/**
+ * struct mesh_config - 802.11s mesh configuration
+ *
+ * These parameters can be changed while the mesh is active.
+ *
+ * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used
+ * by the Mesh Peering Open message
+ * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units
+ * used by the Mesh Peering Open message
+ * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by
+ * the mesh peering management to close a mesh peering
+ * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this
+ * mesh interface
+ * @dot11MeshMaxRetries: the maximum number of peer link open retries that can
+ * be sent to establish a new peer link instance in a mesh
+ * @dot11MeshTTL: the value of TTL field set at a source mesh STA
+ * @element_ttl: the value of TTL field set at a mesh STA for path selection
+ * elements
+ * @auto_open_plinks: whether we should automatically open peer links when we
+ * detect compatible mesh peers
+ * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to
+ * synchronize to for 11s default synchronization method
+ * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ
+ * that an originator mesh STA can send to a particular path target
+ * @path_refresh_time: how frequently to refresh mesh paths in milliseconds
+ * @min_discovery_timeout: the minimum length of time to wait until giving up on
+ * a path discovery in milliseconds
+ * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs
+ * receiving a PREQ shall consider the forwarding information from the
+ * root to be valid. (TU = time unit)
+ * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one action frame containing a PREQ
+ * element
+ * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one Action frame containing a PERR
+ * element
+ * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that
+ * it takes for an HWMP information element to propagate across the mesh
+ * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA
+ * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root
+ * announcements are transmitted
+ * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh
+ * station has access to a broader network beyond the MBSS. (This is
+ * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true
+ * only means that the station will announce others it's a mesh gate, but
+ * not necessarily using the gate announcement protocol. Still keeping the
+ * same nomenclature to be in sync with the spec)
+ * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding
+ * entity (default is TRUE - forwarding entity)
+ * @rssi_threshold: the threshold for average signal strength of candidate
+ * station to establish a peer link
+ * @ht_opmode: mesh HT protection mode
+ *
+ * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs
+ * receiving a proactive PREQ shall consider the forwarding information to
+ * the root mesh STA to be valid.
+ *
+ * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive
+ * PREQs are transmitted.
+ * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs)
+ * during which a mesh STA can send only one Action frame containing
+ * a PREQ element for root path confirmation.
+ * @power_mode: The default mesh power save mode which will be the initial
+ * setting for new peer links.
+ * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake
+ * after transmitting its beacon.
+ * @plink_timeout: If no tx activity is seen from a STA we've established
+ * peering with for longer than this time (in seconds), then remove it
+ * from the STA's list of peers. Default is 30 minutes.
+ */
struct mesh_config {
- /* Timeouts in ms */
- /* Mesh plink management parameters */
u16 dot11MeshRetryTimeout;
u16 dot11MeshConfirmTimeout;
u16 dot11MeshHoldingTimeout;
u16 dot11MeshMaxPeerLinks;
- u8 dot11MeshMaxRetries;
- u8 dot11MeshTTL;
+ u8 dot11MeshMaxRetries;
+ u8 dot11MeshTTL;
+ u8 element_ttl;
bool auto_open_plinks;
- /* HWMP parameters */
- u8 dot11MeshHWMPmaxPREQretries;
+ u32 dot11MeshNbrOffsetMaxNeighbor;
+ u8 dot11MeshHWMPmaxPREQretries;
u32 path_refresh_time;
u16 min_discovery_timeout;
u32 dot11MeshHWMPactivePathTimeout;
u16 dot11MeshHWMPpreqMinInterval;
+ u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
- u8 dot11MeshHWMPRootMode;
+ u8 dot11MeshHWMPRootMode;
+ u16 dot11MeshHWMPRannInterval;
+ bool dot11MeshGateAnnouncementProtocol;
+ bool dot11MeshForwarding;
+ s32 rssi_threshold;
+ u16 ht_opmode;
+ u32 dot11MeshHWMPactivePathToRootTimeout;
+ u16 dot11MeshHWMProotInterval;
+ u16 dot11MeshHWMPconfirmationInterval;
+ enum nl80211_mesh_power_mode power_mode;
+ u16 dot11MeshAwakeWindowDuration;
+ u32 plink_timeout;
+};
+
+/**
+ * struct mesh_setup - 802.11s mesh setup configuration
+ * @chandef: defines the channel to use
+ * @mesh_id: the mesh ID
+ * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
+ * @sync_method: which synchronization method to use
+ * @path_sel_proto: which path selection protocol to use
+ * @path_metric: which metric to use
+ * @auth_id: which authentication method this mesh is using
+ * @ie: vendor information elements (optional)
+ * @ie_len: length of vendor information elements
+ * @is_authenticated: this mesh requires authentication
+ * @is_secure: this mesh uses security
+ * @user_mpm: userspace handles all MPM functions
+ * @dtim_period: DTIM period to use
+ * @beacon_interval: beacon interval to use
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @basic_rates: basic rates to use when creating the mesh
+ *
+ * These parameters are fixed when the mesh is created.
+ */
+struct mesh_setup {
+ struct cfg80211_chan_def chandef;
+ const u8 *mesh_id;
+ u8 mesh_id_len;
+ u8 sync_method;
+ u8 path_sel_proto;
+ u8 path_metric;
+ u8 auth_id;
+ const u8 *ie;
+ u8 ie_len;
+ bool is_authenticated;
+ bool is_secure;
+ bool user_mpm;
+ u8 dtim_period;
+ u16 beacon_interval;
+ int mcast_rate[IEEE80211_NUM_BANDS];
+ u32 basic_rates;
};
/**
* struct ieee80211_txq_params - TX queue parameters
- * @queue: TX queue identifier (NL80211_TXQ_Q_*)
+ * @ac: AC identifier
* @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
* @cwmin: Minimum contention window [a value of the form 2^n-1 in the range
* 1..32767]
@@ -646,16 +1347,13 @@ struct mesh_config {
* @aifs: Arbitration interframe space [0..255]
*/
struct ieee80211_txq_params {
- enum nl80211_txq_q queue;
+ enum nl80211_ac ac;
u16 txop;
u16 cwmin;
u16 cwmax;
u8 aifs;
};
-/* from net/wireless.h */
-struct wiphy;
-
/**
* DOC: Scanning and BSS list handling
*
@@ -696,23 +1394,92 @@ struct cfg80211_ssid {
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
+ * @flags: bit field of flags controlling operation
+ * @rates: bitmap of rates to advertise for each band
* @wiphy: the wiphy this was for
- * @dev: the interface
+ * @scan_start: time (in jiffies) when the scan started
+ * @wdev: the wireless device to scan for
* @aborted: (internal) scan request was notified as aborted
+ * @notified: (internal) scan request was notified as done or aborted
+ * @no_cck: used to send probe requests at non CCK rate in 2GHz band
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
+ u32 flags;
+
+ u32 rates[IEEE80211_NUM_BANDS];
+
+ struct wireless_dev *wdev;
+
+ /* internal */
+ struct wiphy *wiphy;
+ unsigned long scan_start;
+ bool aborted, notified;
+ bool no_cck;
+
+ /* keep last */
+ struct ieee80211_channel *channels[0];
+};
+
+/**
+ * struct cfg80211_match_set - sets of attributes to match
+ *
+ * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ */
+struct cfg80211_match_set {
+ struct cfg80211_ssid ssid;
+ s32 rssi_thold;
+};
+
+/**
+ * struct cfg80211_sched_scan_request - scheduled scan request description
+ *
+ * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
+ * @n_ssids: number of SSIDs
+ * @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
+ * @interval: interval between each scheduled scan cycle
+ * @ie: optional information element(s) to add into Probe Request or %NULL
+ * @ie_len: length of ie in octets
+ * @flags: bit field of flags controlling operation
+ * @match_sets: sets of parameters to be matched for a scan result
+ * entry to be considered valid and to be passed to the host
+ * (others are filtered out).
+ * If ommited, all results are passed.
+ * @n_match_sets: number of match sets
+ * @wiphy: the wiphy this was for
+ * @dev: the interface
+ * @scan_start: start time of the scheduled scan
+ * @channels: channels to scan
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ * contains the minimum over all matchsets
+ */
+struct cfg80211_sched_scan_request {
+ struct cfg80211_ssid *ssids;
+ int n_ssids;
+ u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
+ u32 interval;
+ const u8 *ie;
+ size_t ie_len;
+ u32 flags;
+ struct cfg80211_match_set *match_sets;
+ int n_match_sets;
+ s32 min_rssi_thold;
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
- bool aborted;
+ unsigned long scan_start;
/* keep last */
struct ieee80211_channel *channels[0];
@@ -732,105 +1499,95 @@ enum cfg80211_signal_type {
};
/**
+ * struct cfg80211_bss_ie_data - BSS entry IE data
+ * @tsf: TSF contained in the frame that carried these IEs
+ * @rcu_head: internal use, for freeing
+ * @len: length of the IEs
+ * @data: IE data
+ */
+struct cfg80211_bss_ies {
+ u64 tsf;
+ struct rcu_head rcu_head;
+ int len;
+ u8 data[];
+};
+
+/**
* struct cfg80211_bss - BSS description
*
* This structure describes a BSS (which may also be a mesh network)
* for use in scan results and similar.
*
* @channel: channel this BSS is on
+ * @scan_width: width of the control channel
* @bssid: BSSID of the BSS
- * @tsf: timestamp of last received update
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
- * @information_elements: the information elements (Note that there
- * is no guarantee that these are well-formed!); this is a pointer to
- * either the beacon_ies or proberesp_ies depending on whether Probe
- * Response frame has been received
- * @len_information_elements: total length of the information elements
+ * @ies: the information elements (Note that there is no guarantee that these
+ * are well-formed!); this is a pointer to either the beacon_ies or
+ * proberesp_ies depending on whether Probe Response frame has been
+ * received. It is always non-%NULL.
* @beacon_ies: the information elements from the last Beacon frame
- * @len_beacon_ies: total length of the beacon_ies
+ * (implementation note: if @hidden_beacon_bss is set this struct doesn't
+ * own the beacon_ies, but they're just pointers to the ones from the
+ * @hidden_beacon_bss struct)
* @proberesp_ies: the information elements from the last Probe Response frame
- * @len_proberesp_ies: total length of the proberesp_ies
+ * @hidden_beacon_bss: in case this BSS struct represents a probe response from
+ * a BSS that hides the SSID in its beacon, this points to the BSS struct
+ * that holds the beacon data. @beacon_ies is still valid, of course, and
+ * points to the same data as hidden_beacon_bss->beacon_ies in that case.
* @signal: signal strength value (type depends on the wiphy's signal_type)
- * @free_priv: function pointer to free private data
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
+ enum nl80211_bss_scan_width scan_width;
+
+ const struct cfg80211_bss_ies __rcu *ies;
+ const struct cfg80211_bss_ies __rcu *beacon_ies;
+ const struct cfg80211_bss_ies __rcu *proberesp_ies;
+
+ struct cfg80211_bss *hidden_beacon_bss;
+
+ s32 signal;
- u8 bssid[ETH_ALEN];
- u64 tsf;
u16 beacon_interval;
u16 capability;
- u8 *information_elements;
- size_t len_information_elements;
- u8 *beacon_ies;
- size_t len_beacon_ies;
- u8 *proberesp_ies;
- size_t len_proberesp_ies;
- s32 signal;
+ u8 bssid[ETH_ALEN];
- void (*free_priv)(struct cfg80211_bss *bss);
- u8 priv[0] __attribute__((__aligned__(sizeof(void *))));
+ u8 priv[0] __aligned(sizeof(void *));
};
/**
* ieee80211_bss_get_ie - find IE with given ID
* @bss: the bss to search
* @ie: the IE ID
- * Returns %NULL if not found.
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
*/
const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
/**
- * struct cfg80211_crypto_settings - Crypto settings
- * @wpa_versions: indicates which, if any, WPA versions are enabled
- * (from enum nl80211_wpa_versions)
- * @cipher_group: group key cipher suite (or 0 if unset)
- * @n_ciphers_pairwise: number of AP supported unicast ciphers
- * @ciphers_pairwise: unicast key cipher suites
- * @n_akm_suites: number of AKM suites
- * @akm_suites: AKM suites
- * @control_port: Whether user space controls IEEE 802.1X port, i.e.,
- * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
- * required to assume that the port is unauthorized until authorized by
- * user space. Otherwise, port is marked authorized by default.
- * @control_port_ethertype: the control port protocol that should be
- * allowed through even on unauthorized ports
- * @control_port_no_encrypt: TRUE to prevent encryption of control port
- * protocol frames.
- */
-struct cfg80211_crypto_settings {
- u32 wpa_versions;
- u32 cipher_group;
- int n_ciphers_pairwise;
- u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
- int n_akm_suites;
- u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
- bool control_port;
- __be16 control_port_ethertype;
- bool control_port_no_encrypt;
-};
-
-/**
* struct cfg80211_auth_request - Authentication request data
*
* This structure provides information needed to complete IEEE 802.11
* authentication.
*
- * @bss: The BSS to authenticate with.
+ * @bss: The BSS to authenticate with, the callee must obtain a reference
+ * to it if it needs to keep it.
* @auth_type: Authentication type (algorithm)
* @ie: Extra IEs to add to Authentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
- * @local_state_change: This is a request for a local state only, i.e., no
- * Authentication frame is to be transmitted and authentication state is
- * to be changed without having to wait for a response from the peer STA
- * (AP).
+ * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
+ * Authentication transaction sequence number field.
+ * @sae_data_len: Length of sae_data buffer in octets
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -839,7 +1596,19 @@ struct cfg80211_auth_request {
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len, key_idx;
- bool local_state_change;
+ const u8 *sae_data;
+ size_t sae_data_len;
+};
+
+/**
+ * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
+ *
+ * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
+ * @ASSOC_REQ_DISABLE_VHT: Disable VHT
+ */
+enum cfg80211_assoc_req_flags {
+ ASSOC_REQ_DISABLE_HT = BIT(0),
+ ASSOC_REQ_DISABLE_VHT = BIT(1),
};
/**
@@ -847,12 +1616,21 @@ struct cfg80211_auth_request {
*
* This structure provides information needed to complete IEEE 802.11
* (re)association.
- * @bss: The BSS to associate with.
+ * @bss: The BSS to associate with. If the call is successful the driver is
+ * given a reference that it must give back to cfg80211_send_rx_assoc()
+ * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
+ * association requests while already associating must be rejected.
* @ie: Extra IEs to add to (Re)Association Request frame or %NULL
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
* @crypto: crypto settings
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @vht_capa: VHT capability override
+ * @vht_capa_mask: VHT capability mask indicating which fields to use
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -860,6 +1638,10 @@ struct cfg80211_assoc_request {
size_t ie_len;
struct cfg80211_crypto_settings crypto;
bool use_mfp;
+ u32 flags;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct ieee80211_vht_cap vht_capa, vht_capa_mask;
};
/**
@@ -868,15 +1650,15 @@ struct cfg80211_assoc_request {
* This structure provides information needed to complete IEEE 802.11
* deauthentication.
*
- * @bss: the BSS to deauthenticate from
+ * @bssid: the BSSID of the BSS to deauthenticate from
* @ie: Extra IEs to add to Deauthentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the deauthentication
- * @local_state_change: This is a request for a local state only, i.e., no
- * Deauthentication frame is to be transmitted.
+ * @local_state_change: if set, change local state only and
+ * do not set a deauth frame
*/
struct cfg80211_deauth_request {
- struct cfg80211_bss *bss;
+ const u8 *bssid;
const u8 *ie;
size_t ie_len;
u16 reason_code;
@@ -914,7 +1696,7 @@ struct cfg80211_disassoc_request {
* @ssid_len: The length of the SSID, will always be non-zero.
* @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
* search for IBSSs with a different BSSID.
- * @channel: The channel to use if no IBSS can be found to join.
+ * @chandef: defines the channel to use if no other IBSS to join can be found
* @channel_fixed: The channel should be fixed -- do not search for
* IBSSs to join on other channels.
* @ie: information element(s) to include in the beacon
@@ -922,20 +1704,34 @@ struct cfg80211_disassoc_request {
* @beacon_interval: beacon interval to use
* @privacy: this is a protected network, keys will be configured
* after joining
+ * @control_port: whether user space controls IEEE 802.1X port, i.e.,
+ * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
+ * required to assume that the port is unauthorized until authorized by
+ * user space. Otherwise, port is marked authorized by default.
+ * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
+ * changes the channel when a radar is detected. This is required
+ * to operate on DFS channels.
* @basic_rates: bitmap of basic rates to use when creating the IBSS
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
*/
struct cfg80211_ibss_params {
- u8 *ssid;
- u8 *bssid;
- struct ieee80211_channel *channel;
- u8 *ie;
+ const u8 *ssid;
+ const u8 *bssid;
+ struct cfg80211_chan_def chandef;
+ const u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
u32 basic_rates;
bool channel_fixed;
bool privacy;
+ bool control_port;
+ bool userspace_handles_dfs;
int mcast_rate[IEEE80211_NUM_BANDS];
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
};
/**
@@ -946,31 +1742,55 @@ struct cfg80211_ibss_params {
*
* @channel: The channel to use or %NULL if not specified (auto-select based
* on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ * %NULL if not specified
* @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
* results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ * %NULL if not specified. Unlike the @bssid parameter, the driver is
+ * allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ * to use.
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
* @ie: IEs for association request
* @ie_len: Length of assoc_ie in octets
* @privacy: indicates whether privacy-enabled APs should be used
+ * @mfp: indicate whether management frame protection is used
* @crypto: crypto settings
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @bg_scan_period: Background scan period in seconds
+ * or -1 to indicate that default value is to be used.
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @vht_capa: VHT Capability overrides
+ * @vht_capa_mask: The bits of vht_capa which are to be used.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
- u8 *bssid;
- u8 *ssid;
+ struct ieee80211_channel *channel_hint;
+ const u8 *bssid;
+ const u8 *bssid_hint;
+ const u8 *ssid;
size_t ssid_len;
enum nl80211_auth_type auth_type;
- u8 *ie;
+ const u8 *ie;
size_t ie_len;
bool privacy;
+ enum nl80211_mfp mfp;
struct cfg80211_crypto_settings crypto;
const u8 *key;
u8 key_len, key_idx;
+ u32 flags;
+ int bg_scan_period;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct ieee80211_vht_cap vht_capa;
+ struct ieee80211_vht_cap vht_capa_mask;
};
/**
@@ -995,8 +1815,9 @@ enum wiphy_params_flags {
struct cfg80211_bitrate_mask {
struct {
u32 legacy;
- /* TODO: add support for masking MCS rates; e.g.: */
- /* u8 mcs[IEEE80211_HT_MCS_MASK_LEN]; */
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
} control[IEEE80211_NUM_BANDS];
};
/**
@@ -1009,8 +1830,243 @@ struct cfg80211_bitrate_mask {
* @pmkid: The PMK material itself.
*/
struct cfg80211_pmksa {
- u8 *bssid;
- u8 *pmkid;
+ const u8 *bssid;
+ const u8 *pmkid;
+};
+
+/**
+ * struct cfg80211_pkt_pattern - packet pattern
+ * @mask: bitmask where to match pattern and where to ignore bytes,
+ * one bit per byte, in same format as nl80211
+ * @pattern: bytes to match where bitmask is 1
+ * @pattern_len: length of pattern (in bytes)
+ * @pkt_offset: packet offset (in bytes)
+ *
+ * Internal note: @mask and @pattern are allocated in one chunk of
+ * memory, free @mask only!
+ */
+struct cfg80211_pkt_pattern {
+ const u8 *mask, *pattern;
+ int pattern_len;
+ int pkt_offset;
+};
+
+/**
+ * struct cfg80211_wowlan_tcp - TCP connection parameters
+ *
+ * @sock: (internal) socket for source port allocation
+ * @src: source IP address
+ * @dst: destination IP address
+ * @dst_mac: destination MAC address
+ * @src_port: source port
+ * @dst_port: destination port
+ * @payload_len: data payload length
+ * @payload: data payload buffer
+ * @payload_seq: payload sequence stamping configuration
+ * @data_interval: interval at which to send data packets
+ * @wake_len: wakeup payload match length
+ * @wake_data: wakeup payload match data
+ * @wake_mask: wakeup payload match mask
+ * @tokens_size: length of the tokens buffer
+ * @payload_tok: payload token usage configuration
+ */
+struct cfg80211_wowlan_tcp {
+ struct socket *sock;
+ __be32 src, dst;
+ u16 src_port, dst_port;
+ u8 dst_mac[ETH_ALEN];
+ int payload_len;
+ const u8 *payload;
+ struct nl80211_wowlan_tcp_data_seq payload_seq;
+ u32 data_interval;
+ u32 wake_len;
+ const u8 *wake_data, *wake_mask;
+ u32 tokens_size;
+ /* must be last, variable member */
+ struct nl80211_wowlan_tcp_data_token payload_tok;
+};
+
+/**
+ * struct cfg80211_wowlan - Wake on Wireless-LAN support info
+ *
+ * This structure defines the enabled WoWLAN triggers for the device.
+ * @any: wake up on any activity -- special trigger if device continues
+ * operating as normal during suspend
+ * @disconnect: wake up if getting disconnected
+ * @magic_pkt: wake up on receiving magic packet
+ * @patterns: wake up on receiving packet matching a pattern
+ * @n_patterns: number of patterns
+ * @gtk_rekey_failure: wake up on GTK rekey failure
+ * @eap_identity_req: wake up on EAP identity request packet
+ * @four_way_handshake: wake up on 4-way handshake
+ * @rfkill_release: wake up when rfkill is released
+ * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
+ * NULL if not configured.
+ */
+struct cfg80211_wowlan {
+ bool any, disconnect, magic_pkt, gtk_rekey_failure,
+ eap_identity_req, four_way_handshake,
+ rfkill_release;
+ struct cfg80211_pkt_pattern *patterns;
+ struct cfg80211_wowlan_tcp *tcp;
+ int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce_rules - Coalesce rule parameters
+ *
+ * This structure defines coalesce rule for the device.
+ * @delay: maximum coalescing delay in msecs.
+ * @condition: condition for packet coalescence.
+ * see &enum nl80211_coalesce_condition.
+ * @patterns: array of packet patterns
+ * @n_patterns: number of patterns
+ */
+struct cfg80211_coalesce_rules {
+ int delay;
+ enum nl80211_coalesce_condition condition;
+ struct cfg80211_pkt_pattern *patterns;
+ int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce - Packet coalescing settings
+ *
+ * This structure defines coalescing settings.
+ * @rules: array of coalesce rules
+ * @n_rules: number of rules
+ */
+struct cfg80211_coalesce {
+ struct cfg80211_coalesce_rules *rules;
+ int n_rules;
+};
+
+/**
+ * struct cfg80211_wowlan_wakeup - wakeup report
+ * @disconnect: woke up by getting disconnected
+ * @magic_pkt: woke up by receiving magic packet
+ * @gtk_rekey_failure: woke up by GTK rekey failure
+ * @eap_identity_req: woke up by EAP identity request packet
+ * @four_way_handshake: woke up by 4-way handshake
+ * @rfkill_release: woke up by rfkill being released
+ * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern
+ * @packet_present_len: copied wakeup packet data
+ * @packet_len: original wakeup packet length
+ * @packet: The packet causing the wakeup, if any.
+ * @packet_80211: For pattern match, magic packet and other data
+ * frame triggers an 802.3 frame should be reported, for
+ * disconnect due to deauth 802.11 frame. This indicates which
+ * it is.
+ * @tcp_match: TCP wakeup packet received
+ * @tcp_connlost: TCP connection lost or failed to establish
+ * @tcp_nomoretokens: TCP data ran out of tokens
+ */
+struct cfg80211_wowlan_wakeup {
+ bool disconnect, magic_pkt, gtk_rekey_failure,
+ eap_identity_req, four_way_handshake,
+ rfkill_release, packet_80211,
+ tcp_match, tcp_connlost, tcp_nomoretokens;
+ s32 pattern_idx;
+ u32 packet_present_len, packet_len;
+ const void *packet;
+};
+
+/**
+ * struct cfg80211_gtk_rekey_data - rekey data
+ * @kek: key encryption key
+ * @kck: key confirmation key
+ * @replay_ctr: replay counter
+ */
+struct cfg80211_gtk_rekey_data {
+ u8 kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_LEN];
+ u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+};
+
+/**
+ * struct cfg80211_update_ft_ies_params - FT IE Information
+ *
+ * This structure provides information needed to update the fast transition IE
+ *
+ * @md: The Mobility Domain ID, 2 Octet value
+ * @ie: Fast Transition IEs
+ * @ie_len: Length of ft_ie in octets
+ */
+struct cfg80211_update_ft_ies_params {
+ u16 md;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
+ * struct cfg80211_mgmt_tx_params - mgmt tx parameters
+ *
+ * This structure provides information needed to transmit a mgmt frame
+ *
+ * @chan: channel to use
+ * @offchan: indicates wether off channel operation is required
+ * @wait: duration for ROC
+ * @buf: buffer to transmit
+ * @len: buffer length
+ * @no_cck: don't use cck rates for this frame
+ * @dont_wait_for_ack: tells the low level not to wait for an ack
+ * @n_csa_offsets: length of csa_offsets array
+ * @csa_offsets: array of all the csa offsets in the frame
+ */
+struct cfg80211_mgmt_tx_params {
+ struct ieee80211_channel *chan;
+ bool offchan;
+ unsigned int wait;
+ const u8 *buf;
+ size_t len;
+ bool no_cck;
+ bool dont_wait_for_ack;
+ int n_csa_offsets;
+ const u16 *csa_offsets;
+};
+
+/**
+ * struct cfg80211_dscp_exception - DSCP exception
+ *
+ * @dscp: DSCP value that does not adhere to the user priority range definition
+ * @up: user priority value to which the corresponding DSCP value belongs
+ */
+struct cfg80211_dscp_exception {
+ u8 dscp;
+ u8 up;
+};
+
+/**
+ * struct cfg80211_dscp_range - DSCP range definition for user priority
+ *
+ * @low: lowest DSCP value of this user priority range, inclusive
+ * @high: highest DSCP value of this user priority range, inclusive
+ */
+struct cfg80211_dscp_range {
+ u8 low;
+ u8 high;
+};
+
+/* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */
+#define IEEE80211_QOS_MAP_MAX_EX 21
+#define IEEE80211_QOS_MAP_LEN_MIN 16
+#define IEEE80211_QOS_MAP_LEN_MAX \
+ (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX)
+
+/**
+ * struct cfg80211_qos_map - QoS Map Information
+ *
+ * This struct defines the Interworking QoS map setting for DSCP values
+ *
+ * @num_des: number of DSCP exceptions (0..21)
+ * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from
+ * the user priority DSCP range definition
+ * @up: DSCP range definition for a particular user priority
+ */
+struct cfg80211_qos_map {
+ u8 num_des;
+ struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX];
+ struct cfg80211_dscp_range up[8];
};
/**
@@ -1026,14 +2082,21 @@ struct cfg80211_pmksa {
* wireless extensions but this is subject to reevaluation as soon as this
* code is used more widely and we have a first user without wext.
*
- * @suspend: wiphy device needs to be suspended
+ * @suspend: wiphy device needs to be suspended. The variable @wow will
+ * be %NULL or contain the enabled Wake-on-Wireless triggers that are
+ * configured for the device.
* @resume: wiphy device needs to be resumed
+ * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback
+ * to call device_set_wakeup_enable() to enable/disable wakeup from
+ * the device.
*
* @add_virtual_intf: create a new virtual interface with the given name,
* must set the struct wireless_dev's iftype. Beware: You must create
- * the new netdev in the wiphy's network namespace!
+ * the new netdev in the wiphy's network namespace! Returns the struct
+ * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ * also set the address member in the wdev.
*
- * @del_virtual_intf: remove the virtual interface determined by ifindex.
+ * @del_virtual_intf: remove the virtual interface
*
* @change_virtual_intf: change type/configuration of virtual interface,
* keep the struct wireless_dev's iftype updated.
@@ -1054,16 +2117,20 @@ struct cfg80211_pmksa {
*
* @set_default_mgmt_key: set the default management frame key on an interface
*
- * @add_beacon: Add a beacon with given parameters, @head, @interval
- * and @dtim_period will be valid, @tail is optional.
- * @set_beacon: Change the beacon parameters for an access point mode
- * interface. This should reject the call when no beacon has been
- * configured.
- * @del_beacon: Remove beacon configuration and stop sending the beacon.
+ * @set_rekey_data: give the data necessary for GTK rekeying to the driver
+ *
+ * @start_ap: Start acting in AP mode defined by the parameters.
+ * @change_beacon: Change the beacon parameters for an access point mode
+ * interface. This should reject the call when AP mode wasn't started.
+ * @stop_ap: Stop being an AP, including stopping beaconing.
*
* @add_station: Add a new station.
* @del_station: Remove a station; @mac may be NULL to remove all stations.
- * @change_station: Modify a given station.
+ * @change_station: Modify a given station. Note that flags changes are not much
+ * validated in cfg80211, in particular the auth/assoc/authorized flags
+ * might come to the driver in invalid combinations -- make sure to check
+ * them, also against the existing state! Drivers must call
+ * cfg80211_check_station_change() to validate the information.
* @get_station: get station information for the station identified by @mac
* @dump_station: dump station callback -- resume dump at index @idx
*
@@ -1072,10 +2139,14 @@ struct cfg80211_pmksa {
* @change_mpath: change a given mesh path
* @get_mpath: get a mesh path for the given parameters
* @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @join_mesh: join the mesh network with the specified parameters
+ * (invoked with the wireless_dev mutex held)
+ * @leave_mesh: leave the current mesh network
+ * (invoked with the wireless_dev mutex held)
*
- * @get_mesh_params: Put the current mesh parameters into *params
+ * @get_mesh_config: Get the current mesh configuration
*
- * @set_mesh_params: Set mesh parameters.
+ * @update_mesh_config: Update mesh parameters on a running mesh.
* The mask is a bitfield which tells us which parameters to
* set, and which to leave alone.
*
@@ -1083,11 +2154,14 @@ struct cfg80211_pmksa {
*
* @set_txq_params: Set TX queue parameters
*
- * @set_channel: Set channel for a given wireless interface. Some devices
- * may support multi-channel operation (by channel hopping) so cfg80211
- * doesn't verify much. Note, however, that the passed netdev may be
- * %NULL as well if the user requested changing the channel for the
- * device itself, or for a monitor interface.
+ * @libertas_set_mesh_channel: Only for backward compatibility for libertas,
+ * as it doesn't implement join_mesh and needs to set the channel to
+ * join the mesh instead.
+ *
+ * @set_monitor_channel: Set the monitor mode channel for the device. If other
+ * interfaces are active this callback should reject the configuration.
+ * If no interfaces are active or the device is down, the channel should
+ * be stored for when a monitor interface becomes active.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1095,27 +2169,42 @@ struct cfg80211_pmksa {
* the scan/scan_done bracket too.
*
* @auth: Request to authenticate with the specified peer
+ * (invoked with the wireless_dev mutex held)
* @assoc: Request to (re)associate with the specified peer
+ * (invoked with the wireless_dev mutex held)
* @deauth: Request to deauthenticate from the specified peer
+ * (invoked with the wireless_dev mutex held)
* @disassoc: Request to disassociate from the specified peer
+ * (invoked with the wireless_dev mutex held)
*
* @connect: Connect to the ESS with the specified parameters. When connected,
* call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
* If the connection fails for some reason, call cfg80211_connect_result()
* with the status from the AP.
+ * (invoked with the wireless_dev mutex held)
* @disconnect: Disconnect from the BSS/ESS.
+ * (invoked with the wireless_dev mutex held)
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
* cfg80211_ibss_joined(), also call that function when changing BSSID due
* to a merge.
+ * (invoked with the wireless_dev mutex held)
* @leave_ibss: Leave the IBSS.
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or
+ * MESH mode)
*
* @set_wiphy_params: Notify that wiphy parameters have changed;
* @changed bitfield (see &enum wiphy_params_flags) describes which values
* have changed. The actual parameter values are available in
* struct wiphy. If returning an error, no value should be changed.
*
- * @set_tx_power: set the transmit power according to the parameters
+ * @set_tx_power: set the transmit power according to the parameters,
+ * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The
+ * wdev may be %NULL if power was set for the wiphy, and will
+ * always be %NULL unless the driver supports per-vif TX power
+ * (as advertised by the nl80211 feature flag.)
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
@@ -1134,9 +2223,17 @@ struct cfg80211_pmksa {
* @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
* This allows the operation to be terminated prior to timeout based on
* the duration value.
- * @mgmt_tx: Transmit a management frame
- *
- * @testmode_cmd: run a test mode command
+ * @mgmt_tx: Transmit a management frame.
+ * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
+ * frame on another channel
+ *
+ * @testmode_cmd: run a test mode command; @wdev may be %NULL
+ * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
+ * used by the function, but 0 and 1 must not be touched. Additionally,
+ * return error codes other than -ENOBUFS and -ENOENT will terminate the
+ * dump and return to userspace with an error, so be careful. If any data
+ * was passed in from userspace then the data/len arguments will be present
+ * and point to the data contained in %NL80211_ATTR_TESTDATA.
*
* @set_bitrate_mask: set the bitrate mask configuration
*
@@ -1148,19 +2245,96 @@ struct cfg80211_pmksa {
* @set_power_mgmt: Configure WLAN power management. A timeout value of -1
* allows the driver to adjust the dynamic ps timeout value.
* @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
+ * @set_cqm_txe_config: Configure connection quality monitor TX error
+ * thresholds.
+ * @sched_scan_start: Tell the driver to start a scheduled scan.
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan. This
+ * call must stop the scheduled scan and be ready for starting a new one
+ * before it returns, i.e. @sched_scan_start may be called immediately
+ * after that again and should not fail in that case. The driver should
+ * not call cfg80211_sched_scan_stopped() for a requested stop (when this
+ * method returns 0.)
*
* @mgmt_frame_register: Notify driver that a management frame type was
* registered. Note that this callback may not sleep, and cannot run
* concurrently with itself.
+ *
+ * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
+ * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
+ * reject TX/RX mask combinations they cannot support by returning -EINVAL
+ * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
+ *
+ * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
+ *
+ * @tdls_mgmt: Transmit a TDLS management frame.
+ * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
+ *
+ * @probe_client: probe an associated client, must return a cookie that it
+ * later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
+ *
+ * @get_et_sset_count: Ethtool API to get string-set count.
+ * See @ethtool_ops.get_sset_count
+ *
+ * @get_et_stats: Ethtool API to get a set of u64 stats.
+ * See @ethtool_ops.get_ethtool_stats
+ *
+ * @get_et_strings: Ethtool API to get a set of strings to describe stats
+ * and perhaps other supported types of ethtool data-sets.
+ * See @ethtool_ops.get_strings
+ *
+ * @get_channel: Get the current operating channel for the virtual interface.
+ * For monitor interfaces, it should return %NULL unless there's a single
+ * current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
+ *
+ * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode.
+ * Parameters include ACL policy, an array of MAC address of stations
+ * and the number of MAC addresses. If there is already a list in driver
+ * this new list replaces the existing one. Driver has to clear its ACL
+ * when number of MAC addresses entries is passed as 0. Drivers which
+ * advertise the support for MAC based ACL have to implement this callback.
+ *
+ * @start_radar_detection: Start radar detection in the driver.
+ *
+ * @update_ft_ies: Provide updated Fast BSS Transition information to the
+ * driver. If the SME is in the driver/firmware, this information can be
+ * used in building Authentication and Reassociation Request frames.
+ *
+ * @crit_proto_start: Indicates a critical protocol needs more link reliability
+ * for a given duration (milliseconds). The protocol is provided so the
+ * driver can take the most appropriate actions.
+ * @crit_proto_stop: Indicates critical protocol no longer needs increased link
+ * reliability. This operation can not fail.
+ * @set_coalesce: Set coalesce parameters.
+ *
+ * @channel_switch: initiate channel-switch procedure (with CSA)
+ *
+ * @set_qos_map: Set QoS mapping information to the driver
+ *
+ * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
+ * given interface This is used e.g. for dynamic HT 20/40 MHz channel width
+ * changes during the lifetime of the BSS.
*/
struct cfg80211_ops {
- int (*suspend)(struct wiphy *wiphy);
+ int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
int (*resume)(struct wiphy *wiphy);
-
- int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params);
- int (*del_virtual_intf)(struct wiphy *wiphy, struct net_device *dev);
+ void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
+
+ struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params);
+ int (*del_virtual_intf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
int (*change_virtual_intf)(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type, u32 *flags,
@@ -1177,58 +2351,67 @@ struct cfg80211_ops {
u8 key_index, bool pairwise, const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
struct net_device *netdev,
- u8 key_index);
+ u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
struct net_device *netdev,
u8 key_index);
- int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev,
- struct beacon_parameters *info);
- int (*set_beacon)(struct wiphy *wiphy, struct net_device *dev,
- struct beacon_parameters *info);
- int (*del_beacon)(struct wiphy *wiphy, struct net_device *dev);
+ int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings);
+ int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info);
+ int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev);
int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_parameters *params);
+ const u8 *mac,
+ struct station_parameters *params);
int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac);
+ const u8 *mac);
int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_parameters *params);
+ const u8 *mac,
+ struct station_parameters *params);
int (*get_station)(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo);
+ const u8 *mac, struct station_info *sinfo);
int (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
- int idx, u8 *mac, struct station_info *sinfo);
+ int idx, u8 *mac, struct station_info *sinfo);
int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
- u8 *dst, u8 *next_hop);
+ const u8 *dst, const u8 *next_hop);
int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev,
- u8 *dst);
+ const u8 *dst);
int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev,
- u8 *dst, u8 *next_hop);
+ const u8 *dst, const u8 *next_hop);
int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev,
- u8 *dst, u8 *next_hop,
- struct mpath_info *pinfo);
+ u8 *dst, u8 *next_hop, struct mpath_info *pinfo);
int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
- int idx, u8 *dst, u8 *next_hop,
- struct mpath_info *pinfo);
- int (*get_mesh_params)(struct wiphy *wiphy,
+ int idx, u8 *dst, u8 *next_hop,
+ struct mpath_info *pinfo);
+ int (*get_mesh_config)(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf);
- int (*set_mesh_params)(struct wiphy *wiphy,
- struct net_device *dev,
- const struct mesh_config *nconf, u32 mask);
+ int (*update_mesh_config)(struct wiphy *wiphy,
+ struct net_device *dev, u32 mask,
+ const struct mesh_config *nconf);
+ int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev,
+ const struct mesh_config *conf,
+ const struct mesh_setup *setup);
+ int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev);
+
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
- int (*set_txq_params)(struct wiphy *wiphy,
+ int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
- int (*set_channel)(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type);
+ int (*libertas_set_mesh_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan);
+
+ int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef);
- int (*scan)(struct wiphy *wiphy, struct net_device *dev,
+ int (*scan)(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
int (*auth)(struct wiphy *wiphy, struct net_device *dev,
@@ -1236,11 +2419,9 @@ struct cfg80211_ops {
int (*assoc)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_assoc_request *req);
int (*deauth)(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_deauth_request *req,
- void *cookie);
+ struct cfg80211_deauth_request *req);
int (*disassoc)(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_disassoc_request *req,
- void *cookie);
+ struct cfg80211_disassoc_request *req);
int (*connect)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
@@ -1251,11 +2432,15 @@ struct cfg80211_ops {
struct cfg80211_ibss_params *params);
int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
+ int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
+ int rate[IEEE80211_NUM_BANDS]);
+
int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
- int (*set_tx_power)(struct wiphy *wiphy,
+ int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm);
- int (*get_tx_power)(struct wiphy *wiphy, int *dbm);
+ int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm);
int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
const u8 *addr);
@@ -1263,7 +2448,11 @@ struct cfg80211_ops {
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len);
+ int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
+ int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
#endif
int (*set_bitrate_mask)(struct wiphy *wiphy,
@@ -1281,20 +2470,20 @@ struct cfg80211_ops {
int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
int (*remain_on_channel)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie);
int (*cancel_remain_on_channel)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie);
- int (*mgmt_tx)(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie);
+ int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie);
+ int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie);
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout);
@@ -1303,12 +2492,87 @@ struct cfg80211_ops {
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst);
+ int (*set_cqm_txe_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 rate, u32 pkts, u32 intvl);
+
void (*mgmt_frame_register)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg);
int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+
+ int (*set_ringparam)(struct wiphy *wiphy, u32 tx, u32 rx);
+ void (*get_ringparam)(struct wiphy *wiphy,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
+
+ int (*sched_scan_start)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request);
+ int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev);
+
+ int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data);
+
+ int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, u32 peer_capability,
+ const u8 *buf, size_t len);
+ int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper);
+
+ int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u64 *cookie);
+
+ int (*set_noack_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 noack_map);
+
+ int (*get_et_sset_count)(struct wiphy *wiphy,
+ struct net_device *dev, int sset);
+ void (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+ void (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev,
+ u32 sset, u8 *data);
+
+ int (*get_channel)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef);
+
+ int (*start_p2p_device)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+ void (*stop_p2p_device)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+
+ int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_acl_data *params);
+
+ int (*start_radar_detection)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms);
+ int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie);
+ int (*crit_proto_start)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_crit_proto_id protocol,
+ u16 duration);
+ void (*crit_proto_stop)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
+ int (*set_coalesce)(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce);
+
+ int (*channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params);
+
+ int (*set_qos_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_qos_map *qos_map);
+
+ int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_chan_def *chandef);
};
/*
@@ -1319,25 +2583,6 @@ struct cfg80211_ops {
/**
* enum wiphy_flags - wiphy capability flags
*
- * @WIPHY_FLAG_CUSTOM_REGULATORY: tells us the driver for this device
- * has its own custom regulatory domain and cannot identify the
- * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
- * we will disregard the first regulatory hint (when the
- * initiator is %REGDOM_SET_BY_CORE).
- * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
- * ignore regulatory domain settings until it gets its own regulatory
- * domain via its regulatory_hint() unless the regulatory hint is
- * from a country IE. After its gets its own regulatory domain it will
- * only allow further regulatory domain settings to further enhance
- * compliance. For example if channel 13 and 14 are disabled by this
- * regulatory domain no user regulatory domain can enable these channels
- * at a later time. This can be used for devices which do not have
- * calibration information guaranteed for frequencies or settings
- * outside of its regulatory domain.
- * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
- * that passive scan flags and beaconing flags may not be lifted by
- * cfg80211 due to regulatory beacon hints. For more information on beacon
- * hints read the documenation for regulatory_hint_found_beacon()
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -1351,21 +2596,136 @@ struct cfg80211_ops {
* control port protocol ethertype. The device also honours the
* control_port_no_encrypt flag.
* @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
+ * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
+ * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
+ * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
+ * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
+ * firmware.
+ * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
+ * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation.
+ * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z)
+ * link setup/discovery operations internally. Setup, discovery and
+ * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
+ * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
+ * used for asking the driver/firmware to perform a TDLS operation.
+ * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
+ * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
+ * when there are virtual interfaces in AP mode by calling
+ * cfg80211_report_obss_beacon().
+ * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
+ * responds to probe-requests in hardware.
+ * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
+ * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
+ * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
+ * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
+ * beaconing mode (AP, IBSS, Mesh, ...).
*/
enum wiphy_flags {
- WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
- WIPHY_FLAG_STRICT_REGULATORY = BIT(1),
- WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2),
+ /* use hole at 0 */
+ /* use hole at 1 */
+ /* use hole at 2 */
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
+ WIPHY_FLAG_MESH_AUTH = BIT(10),
+ WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11),
+ /* use hole at 12 */
+ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
+ WIPHY_FLAG_AP_UAPSD = BIT(14),
+ WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
+ WIPHY_FLAG_HAVE_AP_SME = BIT(17),
+ WIPHY_FLAG_REPORTS_OBSS = BIT(18),
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
+ WIPHY_FLAG_OFFCHAN_TX = BIT(20),
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
+ WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
};
-struct mac_address {
- u8 addr[ETH_ALEN];
+/**
+ * struct ieee80211_iface_limit - limit on certain interface types
+ * @max: maximum number of interfaces of these types
+ * @types: interface types (bits)
+ */
+struct ieee80211_iface_limit {
+ u16 max;
+ u16 types;
+};
+
+/**
+ * struct ieee80211_iface_combination - possible interface combination
+ * @limits: limits for the given interface types
+ * @n_limits: number of limitations
+ * @num_different_channels: can use up to this many different channels
+ * @max_interfaces: maximum number of interfaces in total allowed in this
+ * group
+ * @beacon_int_infra_match: In this combination, the beacon intervals
+ * between infrastructure and AP types must match. This is required
+ * only in special cases.
+ * @radar_detect_widths: bitmap of channel widths supported for radar detection
+ * @radar_detect_regions: bitmap of regions supported for radar detection
+ *
+ * With this structure the driver can describe which interface
+ * combinations it supports concurrently.
+ *
+ * Examples:
+ *
+ * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
+ *
+ * struct ieee80211_iface_limit limits1[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ * };
+ * struct ieee80211_iface_combination combination1 = {
+ * .limits = limits1,
+ * .n_limits = ARRAY_SIZE(limits1),
+ * .max_interfaces = 2,
+ * .beacon_int_infra_match = true,
+ * };
+ *
+ *
+ * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
+ *
+ * struct ieee80211_iface_limit limits2[] = {
+ * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
+ * BIT(NL80211_IFTYPE_P2P_GO), },
+ * };
+ * struct ieee80211_iface_combination combination2 = {
+ * .limits = limits2,
+ * .n_limits = ARRAY_SIZE(limits2),
+ * .max_interfaces = 8,
+ * .num_different_channels = 1,
+ * };
+ *
+ *
+ * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
+ *
+ * This allows for an infrastructure connection and three P2P connections.
+ *
+ * struct ieee80211_iface_limit limits3[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ * BIT(NL80211_IFTYPE_P2P_CLIENT), },
+ * };
+ * struct ieee80211_iface_combination combination3 = {
+ * .limits = limits3,
+ * .n_limits = ARRAY_SIZE(limits3),
+ * .max_interfaces = 4,
+ * .num_different_channels = 2,
+ * };
+ */
+struct ieee80211_iface_combination {
+ const struct ieee80211_iface_limit *limits;
+ u32 num_different_channels;
+ u16 max_interfaces;
+ u8 n_limits;
+ bool beacon_int_infra_match;
+ u8 radar_detect_widths;
+ u8 radar_detect_regions;
};
struct ieee80211_txrx_stypes {
@@ -1373,8 +2733,111 @@ struct ieee80211_txrx_stypes {
};
/**
+ * enum wiphy_wowlan_support_flags - WoWLAN support flags
+ * @WIPHY_WOWLAN_ANY: supports wakeup for the special "any"
+ * trigger that keeps the device operating as-is and
+ * wakes up the host on any activity, for example a
+ * received packet that passed filtering; note that the
+ * packet should be preserved in that case
+ * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet
+ * (see nl80211.h)
+ * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect
+ * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep
+ * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure
+ * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request
+ * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure
+ * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release
+ */
+enum wiphy_wowlan_support_flags {
+ WIPHY_WOWLAN_ANY = BIT(0),
+ WIPHY_WOWLAN_MAGIC_PKT = BIT(1),
+ WIPHY_WOWLAN_DISCONNECT = BIT(2),
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3),
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4),
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5),
+ WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6),
+ WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7),
+};
+
+struct wiphy_wowlan_tcp_support {
+ const struct nl80211_wowlan_tcp_data_token_feature *tok;
+ u32 data_payload_max;
+ u32 data_interval_max;
+ u32 wake_payload_max;
+ bool seq;
+};
+
+/**
+ * struct wiphy_wowlan_support - WoWLAN support data
+ * @flags: see &enum wiphy_wowlan_support_flags
+ * @n_patterns: number of supported wakeup patterns
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ * @tcp: TCP wakeup support information
+ */
+struct wiphy_wowlan_support {
+ u32 flags;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+ const struct wiphy_wowlan_tcp_support *tcp;
+};
+
+/**
+ * struct wiphy_coalesce_support - coalesce support data
+ * @n_rules: maximum number of coalesce rules
+ * @max_delay: maximum supported coalescing delay in msecs
+ * @n_patterns: number of supported patterns in a rule
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ */
+struct wiphy_coalesce_support {
+ int n_rules;
+ int max_delay;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+};
+
+/**
+ * enum wiphy_vendor_command_flags - validation flags for vendor commands
+ * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev
+ * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev
+ * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running
+ * (must be combined with %_WDEV or %_NETDEV)
+ */
+enum wiphy_vendor_command_flags {
+ WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0),
+ WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1),
+ WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2),
+};
+
+/**
+ * struct wiphy_vendor_command - vendor command definition
+ * @info: vendor command identifying information, as used in nl80211
+ * @flags: flags, see &enum wiphy_vendor_command_flags
+ * @doit: callback for the operation, note that wdev is %NULL if the
+ * flags didn't ask for a wdev and non-%NULL otherwise; the data
+ * pointer may be %NULL if userspace provided no data at all
+ */
+struct wiphy_vendor_command {
+ struct nl80211_vendor_cmd_info info;
+ u32 flags;
+ int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+};
+
+/**
* struct wiphy - wireless hardware description
- * @reg_notifier: the driver's regulatory notification callback
+ * @reg_notifier: the driver's regulatory notification callback,
+ * note that if your driver uses wiphy_apply_custom_regulatory()
+ * the reg_notifier's request can be passed as NULL
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
* on the reg_notifier() if it chooses to ignore future
@@ -1391,7 +2854,7 @@ struct ieee80211_txrx_stypes {
* @perm_addr: permanent MAC address of this device
* @addr_mask: If the device supports multiple MAC addresses by masking,
* set this to a mask with variable bits set to 1, e.g. if the last
- * four bits are variable then set it to 00:...:00:0f. The actual
+ * four bits are variable then set it to 00-00-00-00-00-0f. The actual
* variable bits shall be determined by the interfaces added, with
* interfaces not matching the mask being rejected to be brought up.
* @n_addresses: number of addresses in @addresses.
@@ -1400,21 +2863,39 @@ struct ieee80211_txrx_stypes {
* by default for perm_addr. In this case, the mask should be set to
* all-zeroes. In this case it is assumed that the device can handle
* the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ * unregister hardware
* @debugfsdir: debugfs directory used for this wiphy, will be renamed
* automatically on wiphy renames
* @dev: (virtual) struct device for this wiphy
+ * @registered: helps synchronize suspend/resume with wiphy unregister
* @wext: wireless extension handlers
* @priv: driver private data (sized according to wiphy_new() parameter)
* @interface_modes: bitmask of interfaces types valid for this wiphy,
* must be set by driver
+ * @iface_combinations: Valid interface combinations array, should not
+ * list single interface types.
+ * @n_iface_combinations: number of entries in @iface_combinations array.
+ * @software_iftypes: bitmask of software interface types, these are not
+ * subject to any restrictions since they are purely managed in SW.
* @flags: wiphy flags, see &enum wiphy_flags
+ * @regulatory_flags: wiphy regulatory flags, see
+ * &enum ieee80211_regulatory_flags
+ * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
* @bss_priv_size: each BSS struct has private data allocated with it,
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
* any given scan
+ * @max_sched_scan_ssids: maximum number of SSIDs the device can scan
+ * for in any given scheduled scan
+ * @max_match_sets: maximum number of match sets the device can handle
+ * when performing a scheduled scan, 0 if filtering is not
+ * supported.
* @max_scan_ie_len: maximum length of user-controlled IEs device can
* add to probe request frames transmitted during a scan, must not
* include fixed IEs like supported rates
+ * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
+ * scans
* @coverage_class: current coverage class
* @fw_version: firmware version for ethtool reporting
* @hw_version: hardware version for ethtool reporting
@@ -1426,6 +2907,66 @@ struct ieee80211_txrx_stypes {
* @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or
* transmitted through nl80211, points to an array indexed by interface
* type
+ *
+ * @available_antennas_tx: bitmap of antennas which are available to be
+ * configured as TX antennas. Antenna configuration commands will be
+ * rejected unless this or @available_antennas_rx is set.
+ *
+ * @available_antennas_rx: bitmap of antennas which are available to be
+ * configured as RX antennas. Antenna configuration commands will be
+ * rejected unless this or @available_antennas_tx is set.
+ *
+ * @probe_resp_offload:
+ * Bitmap of supported protocols for probe response offloading.
+ * See &enum nl80211_probe_resp_offload_support_attr. Only valid
+ * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ *
+ * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
+ * may request, if implemented.
+ *
+ * @wowlan: WoWLAN support information
+ * @wowlan_config: current WoWLAN configuration; this should usually not be
+ * used since access to it is necessarily racy, use the parameter passed
+ * to the suspend() operation instead.
+ *
+ * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
+ * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
+ * If null, then none can be over-ridden.
+ * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
+ * If null, then none can be over-ridden.
+ *
+ * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
+ * supports for ACL.
+ *
+ * @extended_capabilities: extended capabilities supported by the driver,
+ * additional capabilities might be supported by userspace; these are
+ * the 802.11 extended capabilities ("Extended Capabilities element")
+ * and are in the same format as in the information element. See
+ * 802.11-2012 8.4.2.29 for the defined fields.
+ * @extended_capabilities_mask: mask of the valid values
+ * @extended_capabilities_len: length of the extended capabilities
+ * @coalesce: packet coalescing support information
+ *
+ * @vendor_commands: array of vendor commands supported by the hardware
+ * @n_vendor_commands: number of vendor commands
+ * @vendor_events: array of vendor events supported by the hardware
+ * @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ * (including P2P GO) or 0 to indicate no such limit is advertised. The
+ * driver is allowed to advertise a theoretical limit that it can reach in
+ * some cases, but may not always reach.
+ *
+ * @max_num_csa_counters: Number of supported csa_counters in beacons
+ * and probe responses. This value should be set if the driver
+ * wishes to limit the number of csa counters. Default (0) means
+ * infinite.
+ * @max_adj_channel_rssi_comp: max offset of between the channel on which the
+ * frame was sent and the channel on which the frame was heard for which
+ * the reported rssi is still valid. If a driver is able to compensate the
+ * low rssi when a frame is heard on different channel, then it should set
+ * this variable to the maximal offset for which it can compensate.
+ * This value should be set in MHz.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -1438,18 +2979,29 @@ struct wiphy {
const struct ieee80211_txrx_stypes *mgmt_stypes;
+ const struct ieee80211_iface_combination *iface_combinations;
+ int n_iface_combinations;
+ u16 software_iftypes;
+
u16 n_addresses;
/* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
u16 interface_modes;
- u32 flags;
+ u16 max_acl_mac_addrs;
+
+ u32 flags, regulatory_flags, features;
+
+ u32 ap_sme_capa;
enum cfg80211_signal_type signal_type;
int bss_priv_size;
u8 max_scan_ssids;
+ u8 max_sched_scan_ssids;
+ u8 max_match_sets;
u16 max_scan_ie_len;
+ u16 max_sched_scan_ie_len;
int n_cipher_suites;
const u32 *cipher_suites;
@@ -1460,11 +3012,31 @@ struct wiphy {
u32 rts_threshold;
u8 coverage_class;
- char fw_version[ETHTOOL_BUSINFO_LEN];
+ char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+#ifdef CONFIG_PM
+ const struct wiphy_wowlan_support *wowlan;
+ struct cfg80211_wowlan *wowlan_config;
+#endif
+
+ u16 max_remain_on_channel_duration;
+
u8 max_num_pmkids;
+ u32 available_antennas_tx;
+ u32 available_antennas_rx;
+
+ /*
+ * Bitmap of supported protocols for probe response offloading
+ * see &enum nl80211_probe_resp_offload_support_attr. Only valid
+ * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ */
+ u32 probe_resp_offload;
+
+ const u8 *extended_capabilities, *extended_capabilities_mask;
+ u8 extended_capabilities_len;
+
/* If multiple wiphys are registered and you're handed e.g.
* a regular netdev with assigned ieee80211_ptr, you won't
* know whether it points to a wiphy your driver has registered
@@ -1475,20 +3047,26 @@ struct wiphy {
struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
/* Lets us get back the wiphy on the callback */
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request);
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request);
/* fields below are read-only, assigned by cfg80211 */
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain __rcu *regd;
/* the item in /sys/class/ieee80211/ points to this,
* you need use set_wiphy_dev() (see below) */
struct device dev;
+ /* protects ->resume, ->suspend sysfs callbacks against unregister hw */
+ bool registered;
+
/* dir in debugfs: ieee80211/<wiphyname> */
struct dentry *debugfsdir;
+ const struct ieee80211_ht_cap *ht_capa_mod_mask;
+ const struct ieee80211_vht_cap *vht_capa_mod_mask;
+
#ifdef CONFIG_NET_NS
/* the network namespace this phy lives in currently */
struct net *_net;
@@ -1498,7 +3076,18 @@ struct wiphy {
const struct iw_handler_def *wext;
#endif
- char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
+ const struct wiphy_coalesce_support *coalesce;
+
+ const struct wiphy_vendor_command *vendor_commands;
+ const struct nl80211_vendor_cmd_info *vendor_events;
+ int n_vendor_commands, n_vendor_events;
+
+ u16 max_ap_assoc_sta;
+
+ u8 max_num_csa_counters;
+ u8 max_adj_channel_rssi_comp;
+
+ char priv[0] __aligned(NETDEV_ALIGN);
};
static inline struct net *wiphy_net(struct wiphy *wiphy)
@@ -1515,6 +3104,7 @@ static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
* wiphy_priv - return priv from wiphy
*
* @wiphy: the wiphy whose priv pointer to return
+ * Return: The priv of @wiphy.
*/
static inline void *wiphy_priv(struct wiphy *wiphy)
{
@@ -1526,6 +3116,7 @@ static inline void *wiphy_priv(struct wiphy *wiphy)
* priv_to_wiphy - return the wiphy containing the priv
*
* @priv: a pointer previously returned by wiphy_priv
+ * Return: The wiphy of @priv.
*/
static inline struct wiphy *priv_to_wiphy(void *priv)
{
@@ -1548,6 +3139,7 @@ static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev)
* wiphy_dev - get wiphy dev pointer
*
* @wiphy: The wiphy whose device struct to look up
+ * Return: The dev of @wiphy.
*/
static inline struct device *wiphy_dev(struct wiphy *wiphy)
{
@@ -1558,6 +3150,7 @@ static inline struct device *wiphy_dev(struct wiphy *wiphy)
* wiphy_name - get wiphy name
*
* @wiphy: The wiphy whose name to return
+ * Return: The name of @wiphy.
*/
static inline const char *wiphy_name(const struct wiphy *wiphy)
{
@@ -1573,8 +3166,8 @@ static inline const char *wiphy_name(const struct wiphy *wiphy)
* Create a new wiphy and associate the given operations with it.
* @sizeof_priv bytes are allocated for private use.
*
- * The returned pointer must be assigned to each netdev's
- * ieee80211_ptr for proper operation.
+ * Return: A pointer to the new wiphy. This pointer must be
+ * assigned to each netdev's ieee80211_ptr for proper operation.
*/
struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
@@ -1583,9 +3176,9 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
*
* @wiphy: The wiphy to register.
*
- * Returns a non-negative wiphy index or a negative error code.
+ * Return: A non-negative wiphy index or a negative error code.
*/
-extern int wiphy_register(struct wiphy *wiphy);
+int wiphy_register(struct wiphy *wiphy);
/**
* wiphy_unregister - deregister a wiphy from cfg80211
@@ -1596,40 +3189,50 @@ extern int wiphy_register(struct wiphy *wiphy);
* pointer, but the call may sleep to wait for an outstanding
* request that is being handled.
*/
-extern void wiphy_unregister(struct wiphy *wiphy);
+void wiphy_unregister(struct wiphy *wiphy);
/**
* wiphy_free - free wiphy
*
* @wiphy: The wiphy to free
*/
-extern void wiphy_free(struct wiphy *wiphy);
+void wiphy_free(struct wiphy *wiphy);
/* internal structs */
struct cfg80211_conn;
struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
-#define MAX_AUTH_BSSES 4
-
/**
- * struct wireless_dev - wireless per-netdev state
+ * struct wireless_dev - wireless device state
+ *
+ * For netdevs, this structure must be allocated by the driver
+ * that uses the ieee80211_ptr field in struct net_device (this
+ * is intentional so it can be allocated along with the netdev.)
+ * It need not be registered then as netdev registration will
+ * be intercepted by cfg80211 to see the new wireless device.
*
- * This structure must be allocated by the driver/stack
- * that uses the ieee80211_ptr field in struct net_device
- * (this is intentional so it can be allocated along with
- * the netdev.)
+ * For non-netdev uses, it must also be allocated by the driver
+ * in response to the cfg80211 callbacks that require it, as
+ * there's no netdev registration in that case it may not be
+ * allocated outside of callback operations that return it.
*
* @wiphy: pointer to hardware description
* @iftype: interface type
* @list: (private) Used to collect the interfaces
- * @netdev: (private) Used to reference back to the netdev
+ * @netdev: (private) Used to reference back to the netdev, may be %NULL
+ * @identifier: (private) Identifier used in nl80211 to identify this
+ * wireless device if it has no netdev
* @current_bss: (private) Used by the internal configuration code
- * @channel: (private) Used by the internal configuration code to track
- * user-set AP, monitor and WDS channels for wireless extensions
+ * @chandef: (private) Used by the internal configuration code to track
+ * the user-set channel definition.
+ * @preset_chandef: (private) Used by the internal configuration code to
+ * track the channel to be used for AP later
* @bssid: (private) Used by the internal configuration code
* @ssid: (private) Used by the internal configuration code
* @ssid_len: (private) Used by the internal configuration code
+ * @mesh_id_len: (private) Used by the internal configuration code
+ * @mesh_id_up_len: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
* @use_4addr: indicates 4addr mode is used on this interface, must be
* set by driver (if supported) on add_interface BEFORE registering the
@@ -1637,8 +3240,26 @@ struct cfg80211_cached_keys;
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
* @mgmt_registrations_lock: lock for the list
- * @mtx: mutex used to lock data in this struct
- * @cleanup_work: work struct used for cleanup that can't be done directly
+ * @mtx: mutex used to lock data in this struct, may be used by drivers
+ * and some API functions require it held
+ * @beacon_interval: beacon interval used on this device for transmitting
+ * beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @p2p_started: true if this is a P2P Device that has been started
+ * @cac_started: true if DFS channel availability check has been started
+ * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
+ * @cac_time_ms: CAC time in ms
+ * @ps: powersave mode is enabled
+ * @ps_timeout: dynamic powersave timeout
+ * @ap_unexpected_nlportid: (private) netlink port ID of application
+ * registered for unexpected class 3 frames (AP mode)
+ * @conn: (private) cfg80211 software SME connection state machine data
+ * @connect_keys: (private) keys to set after connection is established
+ * @ibss_fixed: (private) IBSS is using fixed BSSID
+ * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
+ * @event_list: (private) list for internal event processing
+ * @event_lock: (private) lock for event list
+ * @owner_nlportid: (private) owner socket port ID
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -1648,44 +3269,53 @@ struct wireless_dev {
struct list_head list;
struct net_device *netdev;
+ u32 identifier;
+
struct list_head mgmt_registrations;
spinlock_t mgmt_registrations_lock;
struct mutex mtx;
- struct work_struct cleanup_work;
+ bool use_4addr, p2p_started;
- bool use_4addr;
+ u8 address[ETH_ALEN] __aligned(sizeof(u16));
/* currently used for IBSS and SME - might be rearranged later */
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
- enum {
- CFG80211_SME_IDLE,
- CFG80211_SME_CONNECTING,
- CFG80211_SME_CONNECTED,
- } sme_state;
+ u8 ssid_len, mesh_id_len, mesh_id_up_len;
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
struct list_head event_list;
spinlock_t event_lock;
- struct cfg80211_internal_bss *authtry_bsses[MAX_AUTH_BSSES];
- struct cfg80211_internal_bss *auth_bsses[MAX_AUTH_BSSES];
struct cfg80211_internal_bss *current_bss; /* associated / joined */
- struct ieee80211_channel *channel;
+ struct cfg80211_chan_def preset_chandef;
+ struct cfg80211_chan_def chandef;
+
+ bool ibss_fixed;
+ bool ibss_dfs_possible;
bool ps;
int ps_timeout;
+ int beacon_interval;
+
+ u32 ap_unexpected_nlportid;
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
+
+ u32 owner_nlportid;
+
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
struct cfg80211_ibss_params ibss;
struct cfg80211_connect_params connect;
struct cfg80211_cached_keys *keys;
- u8 *ie;
+ const u8 *ie;
size_t ie_len;
u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -1695,10 +3325,18 @@ struct wireless_dev {
#endif
};
+static inline u8 *wdev_address(struct wireless_dev *wdev)
+{
+ if (wdev->netdev)
+ return wdev->netdev->dev_addr;
+ return wdev->address;
+}
+
/**
* wdev_priv - return wiphy priv from wireless_dev
*
* @wdev: The wireless device whose wiphy's priv pointer to return
+ * Return: The wiphy priv of @wdev.
*/
static inline void *wdev_priv(struct wireless_dev *wdev)
{
@@ -1715,14 +3353,17 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
/**
* ieee80211_channel_to_frequency - convert channel number to frequency
* @chan: channel number
+ * @band: band, necessary due to channel number overlap
+ * Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-extern int ieee80211_channel_to_frequency(int chan);
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
* @freq: center frequency
+ * Return: The corresponding channel, or 0 if the conversion failed.
*/
-extern int ieee80211_frequency_to_channel(int freq);
+int ieee80211_frequency_to_channel(int freq);
/*
* Name indirection necessary because the ieee80211 code also has
@@ -1731,12 +3372,13 @@ extern int ieee80211_frequency_to_channel(int freq);
* to include both header files you'll (rightfully!) get a symbol
* clash.
*/
-extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
- int freq);
+struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
+ int freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
* @wiphy: the struct wiphy to get the channel for
* @freq: the center frequency of the channel
+ * Return: The channel struct from @wiphy at @freq.
*/
static inline struct ieee80211_channel *
ieee80211_get_channel(struct wiphy *wiphy, int freq)
@@ -1751,15 +3393,26 @@ ieee80211_get_channel(struct wiphy *wiphy, int freq)
* @basic_rates: bitmap of basic rates
* @bitrate: the bitrate for which to find the basic rate
*
- * This function returns the basic rate corresponding to a given
- * bitrate, that is the next lower bitrate contained in the basic
- * rate map, which is, for this function, given as a bitmap of
- * indices of rates in the band's bitrate table.
+ * Return: The basic rate corresponding to a given bitrate, that
+ * is the next lower bitrate contained in the basic rate map,
+ * which is, for this function, given as a bitmap of indices of
+ * rates in the band's bitrate table.
*/
struct ieee80211_rate *
ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
u32 basic_rates, int bitrate);
+/**
+ * ieee80211_mandatory_rates - get mandatory rates for a given band
+ * @sband: the band to look for rates in
+ * @scan_width: width of the control channel
+ *
+ * This function returns a bitmap of the mandatory rates for the given
+ * band, bits are set according to the rate position in the bitrates array.
+ */
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width);
+
/*
* Radiotap parsing functions -- for controlled injection support
*
@@ -1832,13 +3485,14 @@ struct ieee80211_radiotap_iterator {
int _reset_on_ext;
};
-extern int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns);
+int
+ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length,
+ const struct ieee80211_radiotap_vendor_namespaces *vns);
-extern int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator);
+int
+ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
extern const unsigned char rfc1042_header[6];
@@ -1847,22 +3501,34 @@ extern const unsigned char bridge_tunnel_header[6];
/**
* ieee80211_get_hdrlen_from_skb - get header length from data
*
+ * @skb: the frame
+ *
* Given an skb with a raw 802.11 header at the data pointer this function
- * returns the 802.11 header length in bytes (not including encryption
- * headers). If the data in the sk_buff is too short to contain a valid 802.11
- * header the function returns 0.
+ * returns the 802.11 header length.
*
- * @skb: the frame
+ * Return: The 802.11 header length in bytes (not including encryption
+ * headers). Or 0 if the data in the sk_buff is too short to contain a valid
+ * 802.11 header.
*/
unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
/**
* ieee80211_hdrlen - get header length in bytes from frame control
* @fc: frame control field in little-endian format
+ * Return: The header length in bytes.
*/
unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
/**
+ * ieee80211_get_mesh_hdrlen - get mesh extension header length
+ * @meshhdr: the mesh extension header, only the flags field
+ * (first byte) will be accessed
+ * Return: The length of the extension header, which is always at
+ * least 6 bytes and at most 18 if address 5 and 6 are present.
+ */
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
+
+/**
* DOC: Data path helpers
*
* In addition to generic utilities, cfg80211 also offers
@@ -1875,6 +3541,7 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
* @skb: the 802.11 data frame
* @addr: the device MAC address
* @iftype: the virtual interface type
+ * Return: 0 on success. Non-zero on error.
*/
int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype);
@@ -1886,9 +3553,11 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
* @iftype: the virtual interface type
* @bssid: the network bssid (used only for iftype STATION and ADHOC)
* @qos: build 802.11 QoS data frame
+ * Return: 0 on success, or a negative error code.
*/
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype, u8 *bssid, bool qos);
+ enum nl80211_iftype iftype, const u8 *bssid,
+ bool qos);
/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
@@ -1903,16 +3572,21 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
+ * @has_80211_header: Set it true if SKB is with IEEE 802.11 header.
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
- const unsigned int extra_headroom);
+ const unsigned int extra_headroom,
+ bool has_80211_header);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
* @skb: the data frame
+ * @qos_map: Interworking QoS mapping or %NULL if not in use
+ * Return: The 802.1p/1d tag.
*/
-unsigned int cfg80211_classify8021d(struct sk_buff *skb);
+unsigned int cfg80211_classify8021d(struct sk_buff *skb,
+ struct cfg80211_qos_map *qos_map);
/**
* cfg80211_find_ie - find information element in data
@@ -1921,16 +3595,36 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb);
* @ies: data consisting of IEs
* @len: length of data
*
- * This function will return %NULL if the element ID could
- * not be found or if the element is invalid (claims to be
- * longer than the given data), or a pointer to the first byte
- * of the requested element, that is the byte containing the
- * element ID. There are no checks on the element length
- * other than having to fit into the given data.
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data), or a pointer to the first byte of the requested
+ * element, that is the byte containing the element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
*/
const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
/**
+ * cfg80211_find_vendor_ie - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data), or a pointer to
+ * the first byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+ const u8 *ies, int len);
+
+/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
@@ -1958,8 +3652,10 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
*
* Drivers should check the return value, its possible you can get
* an -ENOMEM.
+ *
+ * Return: 0 on success. -ENOMEM.
*/
-extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -1972,144 +3668,42 @@ extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
* custom regulatory domain will be trusted completely and as such previous
* default channel settings will be disregarded. If no rule is found for a
* channel on the regulatory domain the channel will be disabled.
+ * Drivers using this for a wiphy should also set the wiphy flag
+ * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
+ * that called this helper.
*/
-extern void wiphy_apply_custom_regulatory(
- struct wiphy *wiphy,
- const struct ieee80211_regdomain *regd);
+void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+ const struct ieee80211_regdomain *regd);
/**
* freq_reg_info - get regulatory information for the given frequency
* @wiphy: the wiphy for which we want to process this rule for
* @center_freq: Frequency in KHz for which we want regulatory information for
- * @desired_bw_khz: the desired max bandwidth you want to use per
- * channel. Note that this is still 20 MHz if you want to use HT40
- * as HT40 makes use of two channels for its 40 MHz width bandwidth.
- * If set to 0 we'll assume you want the standard 20 MHz.
- * @reg_rule: the regulatory rule which we have for this frequency
*
* Use this function to get the regulatory rule for a specific frequency on
* a given wireless device. If the device has a specific regulatory domain
* it wants to follow we respect that unless a country IE has been received
* and processed already.
*
- * Returns 0 if it was able to find a valid regulatory rule which does
- * apply to the given center_freq otherwise it returns non-zero. It will
- * also return -ERANGE if we determine the given center_freq does not even have
- * a regulatory rule for a frequency range in the center_freq's band. See
- * freq_in_rule_band() for our current definition of a band -- this is purely
- * subjective and right now its 802.11 specific.
+ * Return: A valid pointer, or, when an error occurs, for example if no rule
+ * can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to
+ * check and PTR_ERR() to obtain the numeric return value. The numeric return
+ * value will be -ERANGE if we determine the given center_freq does not even
+ * have a regulatory rule for a frequency range in the center_freq's band.
+ * See freq_in_rule_band() for our current definition of a band -- this is
+ * purely subjective and right now it's 802.11 specific.
*/
-extern int freq_reg_info(struct wiphy *wiphy,
- u32 center_freq,
- u32 desired_bw_khz,
- const struct ieee80211_reg_rule **reg_rule);
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq);
-/*
- * Temporary wext handlers & helper functions
- *
- * In the future cfg80211 will simply assign the entire wext handler
- * structure to netdevs it manages, but we're not there yet.
- */
-int cfg80211_wext_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *name, char *extra);
-int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
-int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
- u32 *mode, char *extra);
-int cfg80211_wext_siwscan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-int cfg80211_wext_giwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
-int cfg80211_wext_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-int cfg80211_wext_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-
-int cfg80211_wext_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra);
-int cfg80211_wext_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra);
-int cfg80211_wext_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid);
-int cfg80211_wext_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid);
-int cfg80211_wext_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra);
-int cfg80211_wext_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra);
-
-int cfg80211_wext_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra);
-int cfg80211_wext_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra);
-int cfg80211_wext_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra);
-int cfg80211_wext_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra);
-int cfg80211_wext_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra);
-int cfg80211_wext_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra);
-int cfg80211_wext_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *extra);
-int cfg80211_wext_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf);
-int cfg80211_wext_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf);
-int cfg80211_wext_siwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *keybuf);
-int cfg80211_wext_giwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *keybuf);
-struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev);
-
-int cfg80211_wext_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra);
-int cfg80211_wext_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra);
-
-int cfg80211_wext_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra);
-int cfg80211_wext_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra);
-
-int cfg80211_wext_siwpmksa(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra);
+/**
+ * reg_initiator_name - map regulatory request initiator enum to name
+ * @initiator: the regulatory request initiator
+ *
+ * You can use this to map the regulatory request initiator enum to a
+ * proper string representation.
+ */
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
/*
* callbacks for asynchronous cfg80211 methods, notification
@@ -2126,10 +3720,41 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted);
/**
- * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
+ * cfg80211_sched_scan_results - notify that new scan results are available
+ *
+ * @wiphy: the wiphy which got scheduled scan results
+ */
+void cfg80211_sched_scan_results(struct wiphy *wiphy);
+
+/**
+ * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason. The driver
+ * is then called back via the sched_scan_stop operation when done.
+ */
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
+
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason. The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
+/**
+ * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
*
* @wiphy: the wiphy reporting the BSS
- * @channel: The channel the frame was received on
+ * @rx_channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @mgmt: the management frame (probe response or beacon)
* @len: length of the management frame
* @signal: the signal strength, type depends on the wiphy's signal_type
@@ -2137,20 +3762,36 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted);
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
+ *
+ * Return: A referenced struct, must be released with cfg80211_put_bss()!
+ * Or %NULL on error.
*/
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
+ struct ieee80211_channel *rx_channel,
struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width_frame(wiphy, rx_channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ mgmt, len, signal, gfp);
+}
/**
* cfg80211_inform_bss - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
- * @channel: The channel the frame was received on
+ * @rx_channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @bssid: the BSSID of the BSS
- * @timestamp: the TSF timestamp sent by the peer
+ * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
* @beacon_interval: the beacon interval announced by the peer
* @ie: additional IEs sent by the peer
@@ -2160,14 +3801,31 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
+ *
+ * Return: A referenced struct, must be released with cfg80211_put_bss()!
+ * Or %NULL on error.
*/
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *rx_channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid,
- u64 timestamp, u16 capability, u16 beacon_interval,
- const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp);
+ struct ieee80211_channel *rx_channel,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width(wiphy, rx_channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen, signal,
+ gfp);
+}
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -2183,11 +3841,23 @@ cfg80211_get_ibss(struct wiphy *wiphy,
WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
}
-struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *meshid, size_t meshidlen,
- const u8 *meshcfg);
-void cfg80211_put_bss(struct cfg80211_bss *bss);
+/**
+ * cfg80211_ref_bss - reference BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @bss: the BSS struct to reference
+ *
+ * Increments the refcount of the given BSS struct.
+ */
+void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+
+/**
+ * cfg80211_put_bss - unref BSS struct
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @bss: the BSS struct
+ *
+ * Decrements the refcount of the given BSS struct.
+ */
+void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
/**
* cfg80211_unlink_bss - unlink BSS from internal data structures
@@ -2201,113 +3871,108 @@ void cfg80211_put_bss(struct cfg80211_bss *bss);
*/
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+static inline enum nl80211_bss_scan_width
+cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return NL80211_BSS_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return NL80211_BSS_CHAN_WIDTH_10;
+ default:
+ return NL80211_BSS_CHAN_WIDTH_20;
+ }
+}
+
/**
- * cfg80211_send_rx_auth - notification of processed authentication
+ * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
* @buf: authentication frame (header + body)
* @len: length of the frame data
*
- * This function is called whenever an authentication has been processed in
- * station mode. The driver is required to call either this function or
- * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth()
- * call. This function may sleep.
- */
-void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * cfg80211_send_auth_timeout - notification of timed out authentication
- * @dev: network device
- * @addr: The MAC address of the device with which the authentication timed out
- *
- * This function may sleep.
+ * This function is called whenever an authentication, disassociation or
+ * deauthentication frame has been received and processed in station mode.
+ * After being asked to authenticate via cfg80211_ops::auth() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * While connected, the driver must calls this for received and processed
+ * disassociation and deauthentication frames. If the frame couldn't be used
+ * because it was unprotected, the driver must call the function
+ * cfg80211_rx_unprot_mlme_mgmt() instead.
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
-void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr);
+void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
/**
- * __cfg80211_auth_canceled - notify cfg80211 that authentication was canceled
+ * cfg80211_auth_timeout - notification of timed out authentication
* @dev: network device
* @addr: The MAC address of the device with which the authentication timed out
*
- * When a pending authentication had no action yet, the driver may decide
- * to not send a deauth frame, but in that case must calls this function
- * to tell cfg80211 about this decision. It is only valid to call this
- * function within the deauth() callback.
+ * This function may sleep. The caller must hold the corresponding wdev's
+ * mutex.
*/
-void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr);
+void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
- * cfg80211_send_rx_assoc - notification of processed association
+ * cfg80211_rx_assoc_resp - notification of processed association response
* @dev: network device
- * @buf: (re)association response frame (header + body)
+ * @bss: the BSS that association was requested with, ownership of the pointer
+ * moves to cfg80211 in this call
+ * @buf: authentication frame (header + body)
* @len: length of the frame data
*
- * This function is called whenever a (re)association response has been
- * processed in station mode. The driver is required to call either this
- * function or cfg80211_send_assoc_timeout() to indicate the result of
- * cfg80211_ops::assoc() call. This function may sleep.
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
-void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_rx_assoc_resp(struct net_device *dev,
+ struct cfg80211_bss *bss,
+ const u8 *buf, size_t len);
/**
- * cfg80211_send_assoc_timeout - notification of timed out association
+ * cfg80211_assoc_timeout - notification of timed out association
* @dev: network device
- * @addr: The MAC address of the device with which the association timed out
+ * @bss: The BSS entry with which association timed out.
*
- * This function may sleep.
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
-void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr);
+void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
/**
- * cfg80211_send_deauth - notification of processed deauthentication
+ * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
* @dev: network device
- * @buf: deauthentication frame (header + body)
+ * @buf: 802.11 frame (header + body)
* @len: length of the frame data
*
* This function is called whenever deauthentication has been processed in
* station mode. This includes both received deauthentication frames and
- * locally generated ones. This function may sleep.
+ * locally generated ones. This function may sleep. The caller must hold the
+ * corresponding wdev's mutex.
*/
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
/**
- * __cfg80211_send_deauth - notification of processed deauthentication
+ * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
* @dev: network device
* @buf: deauthentication frame (header + body)
* @len: length of the frame data
*
- * Like cfg80211_send_deauth(), but doesn't take the wdev lock.
+ * This function is called whenever a received deauthentication or dissassoc
+ * frame has been dropped in station mode because of MFP being used but the
+ * frame was not protected. This function may sleep.
*/
-void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * cfg80211_send_disassoc - notification of processed disassociation
- * @dev: network device
- * @buf: disassociation response frame (header + body)
- * @len: length of the frame data
- *
- * This function is called whenever disassociation has been processed in
- * station mode. This includes both received disassociation frames and locally
- * generated ones. This function may sleep.
- */
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * __cfg80211_send_disassoc - notification of processed disassociation
- * @dev: network device
- * @buf: disassociation response frame (header + body)
- * @len: length of the frame data
- *
- * Like cfg80211_send_disassoc(), but doesn't take the wdev lock.
- */
-void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf,
- size_t len);
+void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev,
+ const u8 *buf, size_t len);
/**
* cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP)
* @dev: network device
* @addr: The source MAC address of the frame
* @key_type: The key type that the received frame used
- * @key_id: Key identifier (0..3)
+ * @key_id: Key identifier (0..3). Can be -1 if missing.
* @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
* @gfp: allocation flags
*
@@ -2324,6 +3989,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
*
* @dev: network device
* @bssid: the BSSID of the IBSS joined
+ * @channel: the channel of the IBSS joined
* @gfp: allocation flags
*
* This function notifies cfg80211 that the device joined an IBSS or
@@ -2333,7 +3999,24 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
* with the locally generated beacon -- this guarantees that there is
* always a scan result for this IBSS. cfg80211 will handle the rest.
*/
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp);
+
+/**
+ * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
+ *
+ * @dev: network device
+ * @macaddr: the MAC address of the new candidate
+ * @ie: information elements advertised by the peer candidate
+ * @ie_len: lenght of the information elements buffer
+ * @gfp: allocation flags
+ *
+ * This function notifies cfg80211 that the mesh peer candidate has been
+ * detected, most likely via a beacon or, less likely, via a probe response.
+ * cfg80211 then sends a notification to userspace.
+ */
+void cfg80211_notify_new_peer_candidate(struct net_device *dev,
+ const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp);
/**
* DOC: RFkill integration
@@ -2368,6 +4051,121 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
*/
void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
+/**
+ * DOC: Vendor commands
+ *
+ * Occasionally, there are special protocol or firmware features that
+ * can't be implemented very openly. For this and similar cases, the
+ * vendor command functionality allows implementing the features with
+ * (typically closed-source) userspace and firmware, using nl80211 as
+ * the configuration mechanism.
+ *
+ * A driver supporting vendor commands must register them as an array
+ * in struct wiphy, with handlers for each one, each command has an
+ * OUI and sub command ID to identify it.
+ *
+ * Note that this feature should not be (ab)used to implement protocol
+ * features that could openly be shared across drivers. In particular,
+ * it must never be required to use vendor commands to implement any
+ * "normal" functionality that higher-level userspace like connection
+ * managers etc. need.
+ */
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+ enum nl80211_commands cmd,
+ enum nl80211_attrs attr,
+ int approxlen);
+
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ enum nl80211_commands cmd,
+ enum nl80211_attrs attr,
+ int vendor_event_idx,
+ int approxlen, gfp_t gfp);
+
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
+
+/**
+ * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call cfg80211_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+ return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA, approxlen);
+}
+
+/**
+ * cfg80211_vendor_cmd_reply - send the reply skb
+ * @skb: The skb, must have been allocated with
+ * cfg80211_vendor_cmd_alloc_reply_skb()
+ *
+ * Since calling this function will usually be the last thing
+ * before returning from the vendor command doit() you should
+ * return the error code. Note that this function consumes the
+ * skb regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
+ */
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * cfg80211_vendor_event_alloc - allocate vendor-specific event skb
+ * @wiphy: the wiphy
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event on the
+ * vendor-specific multicast group.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
+ int event_idx, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA,
+ event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event - send the event
+ * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc()
+ * @gfp: allocation flags
+ *
+ * This function sends the given @skb, which must have been allocated
+ * by cfg80211_vendor_event_alloc(), as an event. It always consumes it.
+ */
+static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
+{
+ __cfg80211_send_event_skb(skb, gfp);
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/**
* DOC: Test mode
@@ -2390,32 +4188,42 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
* the testmode command. Since it is intended for a reply, calling
* it outside of the @testmode_cmd operation is invalid.
*
- * The returned skb (or %NULL if any errors happen) is pre-filled
- * with the wiphy index and set up in a way that any data that is
- * put into the skb (with skb_put(), nla_put() or similar) will end
- * up being within the %NL80211_ATTR_TESTDATA attribute, so all that
- * needs to be done with the skb is adding data for the corresponding
- * userspace tool which can then read that data out of the testdata
- * attribute. You must not modify the skb in any other way.
+ * The returned skb is pre-filled with the wiphy index and set up in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_TESTDATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the testdata attribute. You
+ * must not modify the skb in any other way.
*
* When done, call cfg80211_testmode_reply() with the skb and return
* its error code as the result of the @testmode_cmd operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
-struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
- int approxlen);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+ return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
+ NL80211_ATTR_TESTDATA, approxlen);
+}
/**
* cfg80211_testmode_reply - send the reply skb
* @skb: The skb, must have been allocated with
* cfg80211_testmode_alloc_reply_skb()
*
- * Returns an error code or 0 on success, since calling this
- * function will usually be the last thing before returning
- * from the @testmode_cmd you should return the error code.
- * Note that this function consumes the skb regardless of the
- * return value.
+ * Since calling this function will usually be the last thing
+ * before returning from the @testmode_cmd you should return
+ * the error code. Note that this function consumes the skb
+ * regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
*/
-int cfg80211_testmode_reply(struct sk_buff *skb);
+static inline int cfg80211_testmode_reply(struct sk_buff *skb)
+{
+ return cfg80211_vendor_cmd_reply(skb);
+}
/**
* cfg80211_testmode_alloc_event_skb - allocate testmode event
@@ -2427,17 +4235,24 @@ int cfg80211_testmode_reply(struct sk_buff *skb);
* This function allocates and pre-fills an skb for an event on the
* testmode multicast group.
*
- * The returned skb (or %NULL if any errors happen) is set up in the
- * same way as with cfg80211_testmode_alloc_reply_skb() but prepared
- * for an event. As there, you should simply add data to it that will
- * then end up in the %NL80211_ATTR_TESTDATA attribute. Again, you must
- * not modify the skb in any other way.
+ * The returned skb is set up in the same way as with
+ * cfg80211_testmode_alloc_reply_skb() but prepared for an event. As
+ * there, you should simply add data to it that will then end up in the
+ * %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb
+ * in any other way.
*
* When done filling the skb, call cfg80211_testmode_event() with the
* skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
- int approxlen, gfp_t gfp);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+ NL80211_ATTR_TESTDATA, -1,
+ approxlen, gfp);
+}
/**
* cfg80211_testmode_event - send the event
@@ -2449,11 +4264,16 @@ struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
* by cfg80211_testmode_alloc_event_skb(), as an event. It always
* consumes it.
*/
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp);
+static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+{
+ __cfg80211_send_event_skb(skb, gfp);
+}
#define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd),
+#define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd),
#else
#define CFG80211_TESTMODE_CMD(cmd)
+#define CFG80211_TESTMODE_DUMP(cmd)
#endif
/**
@@ -2482,6 +4302,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* cfg80211_roamed - notify cfg80211 of roaming
*
* @dev: network device
+ * @channel: the channel of the new AP
* @bssid: the BSSID of the new AP
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
@@ -2492,11 +4313,39 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* It should be called by the underlying driver whenever it roamed
* from one AP to another while connected.
*/
-void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
+void cfg80211_roamed(struct net_device *dev,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
/**
+ * cfg80211_roamed_bss - notify cfg80211 of roaming
+ *
+ * @dev: network device
+ * @bss: entry of bss to which STA got roamed
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @gfp: allocation flags
+ *
+ * This is just a wrapper to notify cfg80211 of roaming event with driver
+ * passing bss to avoid a race in timeout of the bss entry. It should be
+ * called by the underlying driver whenever it roamed from one AP to another
+ * while connected. Drivers which have roaming implemented in firmware
+ * may use this function to avoid a race in bss entry timeout where the bss
+ * entry of the new AP is seen in the driver, but gets timed out by the time
+ * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * rdev->event_work. In case of any failures, the reference is released
+ * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
+ * it will be released while diconneting from the current bss.
+ */
+void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+
+/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
*
* @dev: network device
@@ -2509,35 +4358,30 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
* and not try to connect to any AP any more.
*/
void cfg80211_disconnected(struct net_device *dev, u16 reason,
- u8 *ie, size_t ie_len, gfp_t gfp);
+ const u8 *ie, size_t ie_len, gfp_t gfp);
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
- * @dev: network device
+ * @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
- * @channel_type: Channel type
* @duration: Duration in milliseconds that the driver intents to remain on the
* channel
* @gfp: allocation flags
*/
-void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp);
/**
* cfg80211_remain_on_channel_expired - remain_on_channel duration expired
- * @dev: network device
+ * @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
- * @channel_type: Channel type
* @gfp: allocation flags
*/
-void cfg80211_remain_on_channel_expired(struct net_device *dev,
- u64 cookie,
+void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
gfp_t gfp);
@@ -2553,27 +4397,57 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_del_sta - notify userspace about deletion of a station
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @gfp: allocation flags
+ */
+void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
+
+/**
+ * cfg80211_conn_failed - connection request failed notification
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @reason: the reason for connection failure
+ * @gfp: allocation flags
+ *
+ * Whenever a station tries to connect to an AP and if the station
+ * could not connect to the AP as the AP has rejected the connection
+ * for some reasons, this function is called.
+ *
+ * The reason for connection failure can be any of the value from
+ * nl80211_connect_failed_reason enum
+ */
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+ enum nl80211_connect_failed_reason reason,
+ gfp_t gfp);
+
+/**
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
- * @dev: network device
+ * @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
+ * @sig_dbm: signal strength in mBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
* @gfp: context flags
*
- * Returns %true if a user space application has registered for this frame.
+ * This function is called whenever an Action frame is received for a station
+ * mode interface, but is not processed in kernel.
+ *
+ * Return: %true if a user space application has registered for this frame.
* For action frames, that makes it responsible for rejecting unrecognized
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
- *
- * This function is called whenever an Action frame is received for a station
- * mode interface, but is not processed in kernel.
*/
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
- size_t len, gfp_t gfp);
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
- * @dev: network device
+ * @wdev: wireless device receiving the frame
* @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
* @buf: Management frame (header + body)
* @len: length of the frame data
@@ -2584,7 +4458,7 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt.
*/
-void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
+void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack, gfp_t gfp);
@@ -2602,6 +4476,33 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
gfp_t gfp);
/**
+ * cfg80211_radar_event - radar detection event
+ * @wiphy: the wiphy
+ * @chandef: chandef for the current channel
+ * @gfp: context flags
+ *
+ * This function is called when a radar is detected on the current chanenl.
+ */
+void cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef, gfp_t gfp);
+
+/**
+ * cfg80211_cac_event - Channel availability check (CAC) event
+ * @netdev: network device
+ * @chandef: chandef for the current channel
+ * @event: type of event
+ * @gfp: context flags
+ *
+ * This function is called when a Channel availability check (CAC) is finished
+ * or aborted. This must be called to notify the completion of a CAC process,
+ * also by full-MAC drivers.
+ */
+void cfg80211_cac_event(struct net_device *netdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event, gfp_t gfp);
+
+
+/**
* cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
* @dev: network device
* @peer: peer's MAC address
@@ -2613,6 +4514,335 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
void cfg80211_cqm_pktloss_notify(struct net_device *dev,
const u8 *peer, u32 num_packets, gfp_t gfp);
+/**
+ * cfg80211_cqm_txe_notify - TX error rate event
+ * @dev: network device
+ * @peer: peer's MAC address
+ * @num_packets: how many packets were lost
+ * @rate: % of packets which failed transmission
+ * @intvl: interval (in s) over which the TX failure threshold was breached.
+ * @gfp: context flags
+ *
+ * Notify userspace when configured % TX failures over number of packets in a
+ * given interval is exceeded.
+ */
+void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
+
+/**
+ * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
+ * @dev: network device
+ * @bssid: BSSID of AP (to avoid races)
+ * @replay_ctr: new replay counter
+ * @gfp: allocation flags
+ */
+void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
+ const u8 *replay_ctr, gfp_t gfp);
+
+/**
+ * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate
+ * @dev: network device
+ * @index: candidate index (the smaller the index, the higher the priority)
+ * @bssid: BSSID of AP
+ * @preauth: Whether AP advertises support for RSN pre-authentication
+ * @gfp: allocation flags
+ */
+void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp);
+
+/**
+ * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * a spurious class 3 frame was received, to be able to deauth the
+ * sender.
+ * Return: %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * an associated station sent a 4addr frame but that wasn't expected.
+ * It is allowed and desirable to send this event only once for each
+ * station to avoid event flooding.
+ * Return: %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_probe_status - notify userspace about probe status
+ * @dev: the device the probe was sent on
+ * @addr: the address of the peer
+ * @cookie: the cookie filled in @probe_client previously
+ * @acked: indicates whether probe was acked or not
+ * @gfp: allocation flags
+ */
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+ u64 cookie, bool acked, gfp_t gfp);
+
+/**
+ * cfg80211_report_obss_beacon - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on
+ * @sig_dbm: signal strength in mBm, or 0 if unknown
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, int sig_dbm);
+
+/**
+ * cfg80211_reg_can_beacon - check if beaconing is allowed
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @iftype: interface type
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.)
+ */
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
+/*
+ * cfg80211_ch_switch_notify - update wdev channel and notify userspace
+ * @dev: the device which switched channels
+ * @chandef: the new channel definition
+ *
+ * Caller must acquire wdev_lock, therefore must only be called from sleepable
+ * driver context!
+ */
+void cfg80211_ch_switch_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef);
+
+/**
+ * ieee80211_operating_class_to_band - convert operating class to band
+ *
+ * @operating_class: the operating class to convert
+ * @band: band pointer to fill
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_band(u8 operating_class,
+ enum ieee80211_band *band);
+
+/*
+ * cfg80211_tdls_oper_request - request userspace to perform TDLS operation
+ * @dev: the device on which the operation is requested
+ * @peer: the MAC address of the peer device
+ * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or
+ * NL80211_TDLS_TEARDOWN)
+ * @reason_code: the reason code for teardown request
+ * @gfp: allocation flags
+ *
+ * This function is used to request userspace to perform TDLS operation that
+ * requires knowledge of keys, i.e., link setup or teardown when the AP
+ * connection uses encryption. This is optional mechanism for the driver to use
+ * if it can automatically determine when a TDLS link could be useful (e.g.,
+ * based on traffic and signal strength for a peer).
+ */
+void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
+ enum nl80211_tdls_operation oper,
+ u16 reason_code, gfp_t gfp);
+
+/*
+ * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
+ * @rate: given rate_info to calculate bitrate from
+ *
+ * return 0 if MCS index >= 32
+ */
+u32 cfg80211_calculate_bitrate(struct rate_info *rate);
+
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * Call this function only for wdevs that have no netdev assigned,
+ * e.g. P2P Devices. It removes the device from the list so that
+ * it can no longer be used. It is necessary to call this function
+ * even when cfg80211 requests the removal of the interface by
+ * calling the del_virtual_intf() callback. The function must also
+ * be called when the driver wishes to unregister the wdev, e.g.
+ * when the device is unbound from the driver.
+ *
+ * Requires the RTNL to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
+/**
+ * struct cfg80211_ft_event - FT Information Elements
+ * @ies: FT IEs
+ * @ies_len: length of the FT IE in bytes
+ * @target_ap: target AP's MAC address
+ * @ric_ies: RIC IE
+ * @ric_ies_len: length of the RIC IE in bytes
+ */
+struct cfg80211_ft_event_params {
+ const u8 *ies;
+ size_t ies_len;
+ const u8 *target_ap;
+ const u8 *ric_ies;
+ size_t ric_ies_len;
+};
+
+/**
+ * cfg80211_ft_event - notify userspace about FT IE and RIC IE
+ * @netdev: network device
+ * @ft_event: IE information
+ */
+void cfg80211_ft_event(struct net_device *netdev,
+ struct cfg80211_ft_event_params *ft_event);
+
+/**
+ * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer
+ * @ies: the input IE buffer
+ * @len: the input length
+ * @attr: the attribute ID to find
+ * @buf: output buffer, can be %NULL if the data isn't needed, e.g.
+ * if the function is only called to get the needed buffer size
+ * @bufsize: size of the output buffer
+ *
+ * The function finds a given P2P attribute in the (vendor) IEs and
+ * copies its contents to the given buffer.
+ *
+ * Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is
+ * malformed or the attribute can't be found (respectively), or the
+ * length of the found attribute (which can be zero).
+ */
+int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
+ enum ieee80211_p2p_attr_id attr,
+ u8 *buf, unsigned int bufsize);
+
+/**
+ * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
+ * @wdev: the wireless device reporting the wakeup
+ * @wakeup: the wakeup report
+ * @gfp: allocation flags
+ *
+ * This function reports that the given device woke up. If it
+ * caused the wakeup, report the reason(s), otherwise you may
+ * pass %NULL as the @wakeup parameter to advertise that something
+ * else caused the wakeup.
+ */
+void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ gfp_t gfp);
+
+/**
+ * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver.
+ *
+ * @wdev: the wireless device for which critical protocol is stopped.
+ * @gfp: allocation flags
+ *
+ * This function can be called by the driver to indicate it has reverted
+ * operation back to normal. One reason could be that the duration given
+ * by .crit_proto_start() has expired.
+ */
+void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
+
+/**
+ * ieee80211_get_num_supported_channels - get number of channels device has
+ * @wiphy: the wiphy
+ *
+ * Return: the number of channels supported by the device.
+ */
+unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
+
+/**
+ * cfg80211_check_combinations - check interface combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ *
+ * This function can be called by the driver to check whether a
+ * combination of interfaces and their types are allowed according to
+ * the interface combinations.
+ */
+int cfg80211_check_combinations(struct wiphy *wiphy,
+ const int num_different_channels,
+ const u8 radar_detect,
+ const int iftype_num[NUM_NL80211_IFTYPES]);
+
+/**
+ * cfg80211_iter_combinations - iterate over matching combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ * @iter: function to call for each matching combination
+ * @data: pointer to pass to iter function
+ *
+ * This function can be called by the driver to check what possible
+ * combinations it fits in at a given moment, e.g. for channel switching
+ * purposes.
+ */
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+ const int num_different_channels,
+ const u8 radar_detect,
+ const int iftype_num[NUM_NL80211_IFTYPES],
+ void (*iter)(const struct ieee80211_iface_combination *c,
+ void *data),
+ void *data);
+
+/*
+ * cfg80211_stop_iface - trigger interface disconnection
+ *
+ * @wiphy: the wiphy
+ * @wdev: wireless device
+ * @gfp: context flags
+ *
+ * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA
+ * disconnected.
+ *
+ * Note: This doesn't need any locks and is asynchronous.
+ */
+void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
+ gfp_t gfp);
+
+/**
+ * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
+ * @wiphy: the wiphy to shut down
+ *
+ * This function shuts down all interfaces belonging to this wiphy by
+ * calling dev_close() (and treating non-netdev interfaces as needed).
+ * It shouldn't really be used unless there are some fatal device errors
+ * that really can't be recovered in any other way.
+ *
+ * Callers must hold the RTNL and be able to deal with callbacks into
+ * the driver while the function is running.
+ */
+void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index ba55d8b8c87..87cb1903640 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -57,18 +57,33 @@ static __inline__ __wsum csum_and_copy_to_user
}
#endif
+#ifndef HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = (__force u32)csum;
res += (__force u32)addend;
return (__force __wsum)(res + (res < (__force u32)addend));
}
+#endif
static inline __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
+static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+{
+ u16 res = (__force u16)csum;
+
+ res += (__force u16)addend;
+ return (__force __sum16)(res + (res < (__force u16)addend));
+}
+
+static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+{
+ return csum16_add(csum, ~addend);
+}
+
static inline __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
@@ -79,6 +94,12 @@ csum_block_add(__wsum csum, __wsum csum2, int offset)
}
static inline __wsum
+csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
+{
+ return csum_block_add(csum, csum2, offset);
+}
+
+static inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
u32 sum = (__force u32)csum2;
@@ -92,6 +113,11 @@ static inline __wsum csum_unfold(__sum16 n)
return (__force __wsum)n;
}
+static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+{
+ return csum_partial(buff, len, sum);
+}
+
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
@@ -101,14 +127,23 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
*sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
}
-static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
+/* Implements RFC 1624 (Incremental Internet Checksum)
+ * 3. Discussion states :
+ * HC' = ~(~HC + ~m + m')
+ * m : old value of a 16bit field
+ * m' : new value of a 16bit field
+ */
+static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
- csum_replace4(sum, (__force __be32)from, (__force __be32)to);
+ *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
struct sk_buff;
-extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
+void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+ __be32 from, __be32 to, int pseudohdr);
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ int pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index abd443604c9..a6fd939f202 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -8,7 +8,7 @@
* have chosen to adopt the protocol and over the years it has become a
* de-facto standard for labeled networking.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -26,8 +26,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -41,7 +40,8 @@
#include <linux/skbuff.h>
#include <net/netlabel.h>
#include <net/request_sock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+#include <asm/unaligned.h>
/* known doi values */
#define CIPSO_V4_DOI_UNKNOWN 0x00000000
@@ -285,7 +285,35 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
static inline int cipso_v4_validate(const struct sk_buff *skb,
unsigned char **option)
{
- return -ENOSYS;
+ unsigned char *opt = *option;
+ unsigned char err_offset = 0;
+ u8 opt_len = opt[1];
+ u8 opt_iter;
+ u8 tag_len;
+
+ if (opt_len < 8) {
+ err_offset = 1;
+ goto out;
+ }
+
+ if (get_unaligned_be32(&opt[2]) == 0) {
+ err_offset = 2;
+ goto out;
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
+ tag_len = opt[opt_iter + 1];
+ if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+ goto out;
+ }
+ opt_iter += tag_len;
+ }
+
+out:
+ *option = opt + err_offset;
+ return err_offset;
+
}
#endif /* CONFIG_NETLABEL */
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index a4dc5b027bd..c15d39456e1 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -16,55 +16,42 @@
#include <linux/cgroup.h>
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
+#include <net/sock.h>
-#ifdef CONFIG_CGROUPS
-struct cgroup_cls_state
-{
+#ifdef CONFIG_CGROUP_NET_CLASSID
+struct cgroup_cls_state {
struct cgroup_subsys_state css;
u32 classid;
};
-#ifdef CONFIG_NET_CLS_CGROUP
+struct cgroup_cls_state *task_cls_state(struct task_struct *p);
+
static inline u32 task_cls_classid(struct task_struct *p)
{
- int classid;
+ u32 classid;
if (in_interrupt())
return 0;
rcu_read_lock();
- classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+ classid = container_of(task_css(p, net_cls_cgrp_id),
struct cgroup_cls_state, css)->classid;
rcu_read_unlock();
return classid;
}
-#else
-extern int net_cls_subsys_id;
-static inline u32 task_cls_classid(struct task_struct *p)
+static inline void sock_update_classid(struct sock *sk)
{
- int id;
- u32 classid = 0;
-
- if (in_interrupt())
- return 0;
-
- rcu_read_lock();
- id = rcu_dereference_index_check(net_cls_subsys_id,
- rcu_read_lock_held());
- if (id >= 0)
- classid = container_of(task_subsys_state(p, id),
- struct cgroup_cls_state, css)->classid;
- rcu_read_unlock();
+ u32 classid;
- return classid;
+ classid = task_cls_classid(current);
+ if (classid != sk->sk_classid)
+ sk->sk_classid = classid;
}
-#endif
-#else
-static inline u32 task_cls_classid(struct task_struct *p)
+#else /* !CONFIG_CGROUP_NET_CLASSID */
+static inline void sock_update_classid(struct sock *sk)
{
- return 0;
}
-#endif
+#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 00000000000..fe0eab32ce7
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,355 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/* CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+/* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia:
+ * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
+ * codel_time_after(a,b) returns true if the time a is after time b.
+ */
+#define codel_time_after(a, b) \
+ (typecheck(codel_time_t, a) && \
+ typecheck(codel_time_t, b) && \
+ ((s32)((a) - (b)) > 0))
+#define codel_time_before(a, b) codel_time_after(b, a)
+
+#define codel_time_after_eq(a, b) \
+ (typecheck(codel_time_t, a) && \
+ typecheck(codel_time_t, b) && \
+ ((s32)((a) - (b)) >= 0))
+#define codel_time_before_eq(a, b) codel_time_after_eq(b, a)
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target: target queue size (in time units)
+ * @interval: width of moving time window
+ * @ecn: is Explicit Congestion Notification enabled
+ */
+struct codel_params {
+ codel_time_t target;
+ codel_time_t interval;
+ bool ecn;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count: how many drops we've done since the last time we
+ * entered dropping state
+ * @lastcount: count at entry to dropping state
+ * @dropping: set to true if in dropping state
+ * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
+ * @first_above_time: when we went (or will go) continuously above target
+ * for interval
+ * @drop_next: time to drop next packet, or when we dropped last
+ * @ldelay: sojourn time of last dequeued packet
+ */
+struct codel_vars {
+ u32 count;
+ u32 lastcount;
+ bool dropping;
+ u16 rec_inv_sqrt;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+};
+
+#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
+/* needed shift to get a Q0.32 number from rec_inv_sqrt */
+#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+ * @drop_count: temp count of dropped packets in dequeue()
+ * ecn_mark: number of packets we ECN marked instead of dropping
+ */
+struct codel_stats {
+ u32 maxpacket;
+ u32 drop_count;
+ u32 ecn_mark;
+};
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 256;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
+
+ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 rec_inv_sqrt)
+{
+ return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+ struct Qdisc *sch,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+ stats->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ sch->qstats.backlog <= stats->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_dequeue_t dequeue_func)
+{
+ struct sk_buff *skb = dequeue_func(vars, sch);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, sch, vars, params, stats, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++; /* dont care of possible wrap
+ * since there is no more divide
+ */
+ codel_Newton_step(vars);
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ goto end;
+ }
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, sch);
+ if (!codel_should_drop(skb, sch,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ }
+ }
+ }
+ } else if (drop) {
+ u32 delta;
+
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, sch);
+ drop = codel_should_drop(skb, sch, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+ /* if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = delta;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
+ */
+ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
+ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->rec_inv_sqrt);
+ }
+end:
+ return skb;
+}
+#endif
diff --git a/include/net/compat.h b/include/net/compat.h
index 28d5428ec6a..3b603b199c0 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -29,8 +29,8 @@ struct compat_cmsghdr {
compat_int_t cmsg_type;
};
-extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
+int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#else /* defined(CONFIG_COMPAT) */
/*
@@ -40,22 +40,30 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
-extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
- unsigned, unsigned,
- struct compat_timespec __user *);
-extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
-
-extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
-
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
- int (*)(struct sock *, int, int, char __user *, unsigned int));
-extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
- int __user *, int (*)(struct sock *, int, int, char __user *,
- int __user *));
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+int verify_compat_iovec(struct msghdr *, struct iovec *,
+ struct sockaddr_storage *, int);
+asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
+ unsigned int);
+asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+ unsigned int, unsigned int);
+asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
+ unsigned int);
+asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
+ unsigned int, unsigned int,
+ struct compat_timespec __user *);
+asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
+ int __user *);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+ unsigned char *, int);
+
+int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+ int (*)(struct sock *, int, int, char __user *,
+ unsigned int));
+int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
+ int (*)(struct sock *, int, int, char __user *,
+ int __user *));
#endif /* NET_COMPAT_H */
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca75db4..93cb18f729b 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@ struct datalink_proto {
struct list_head node;
};
+struct datalink_proto *make_EII_client(void);
+void destroy_EII_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
new file mode 100644
index 00000000000..aec07c8a660
--- /dev/null
+++ b/include/net/dcbevent.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: John Fastabend <john.r.fastabend@intel.com>
+ */
+
+#ifndef _DCB_EVENT_H
+#define _DCB_EVENT_H
+
+enum dcbevent_notif_type {
+ DCB_APP_EVENT = 1,
+};
+
+#ifdef CONFIG_DCB
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
+#else
+static inline int
+register_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_dcbevent_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_dcbevent_notifiers(unsigned long val, void *v)
+{
+ return 0;
+}
+#endif /* CONFIG_DCB */
+
+#endif
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index b36ac7e0914..a975edf21b2 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Lucy Liu <lucy.liu@intel.com>
*/
@@ -20,11 +19,45 @@
#ifndef __NET_DCBNL_H__
#define __NET_DCBNL_H__
+#include <linux/dcbnl.h>
+
+struct dcb_app_type {
+ int ifindex;
+ struct dcb_app app;
+ struct list_head list;
+ u8 dcbx;
+};
+
+int dcb_setapp(struct net_device *, struct dcb_app *);
+u8 dcb_getapp(struct net_device *, struct dcb_app *);
+int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
+int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
+u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+
+int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid);
+int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid);
+
/*
* Ops struct for the netlink callbacks. Used by DCB-enabled drivers through
* the netdevice struct.
*/
struct dcbnl_rtnl_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_setets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
+ int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
+ int (*ieee_getapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_setapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_delapp) (struct net_device *, struct dcb_app *);
+ int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *);
+ int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *);
+
+ /* CEE std */
u8 (*getstate)(struct net_device *);
u8 (*setstate)(struct net_device *, u8);
void (*getpermhwaddr)(struct net_device *, u8 *);
@@ -40,8 +73,8 @@ struct dcbnl_rtnl_ops {
void (*getpfccfg)(struct net_device *, int, u8 *);
u8 (*setall)(struct net_device *);
u8 (*getcap)(struct net_device *, int, u8 *);
- u8 (*getnumtcs)(struct net_device *, int, u8 *);
- u8 (*setnumtcs)(struct net_device *, int, u8);
+ int (*getnumtcs)(struct net_device *, int, u8 *);
+ int (*setnumtcs)(struct net_device *, int, u8);
u8 (*getpfcstate)(struct net_device *);
void (*setpfcstate)(struct net_device *, u8);
void (*getbcncfg)(struct net_device *, int, u32 *);
@@ -50,6 +83,21 @@ struct dcbnl_rtnl_ops {
void (*setbcnrp)(struct net_device *, int, u8);
u8 (*setapp)(struct net_device *, u8, u16, u8);
u8 (*getapp)(struct net_device *, u8, u16);
+ u8 (*getfeatcfg)(struct net_device *, int, u8 *);
+ u8 (*setfeatcfg)(struct net_device *, int, u8);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct net_device *);
+ u8 (*setdcbx)(struct net_device *, u8);
+
+ /* peer apps */
+ int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *,
+ u16 *);
+ int (*peer_getapptable)(struct net_device *, struct dcb_app *);
+
+ /* CEE peer */
+ int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
+ int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
};
#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/dn.h b/include/net/dn.h
index a514a3cf457..913b73d239f 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -3,6 +3,7 @@
#include <linux/dn.h>
#include <net/sock.h>
+#include <net/flow.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -192,30 +193,34 @@ static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
ethaddr[5] = (__u8)(a >> 8);
}
-static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
+static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
{
- fl->uli_u.dnports.sport = scp->addrloc;
- fl->uli_u.dnports.dport = scp->addrrem;
+ fld->fld_sport = scp->addrloc;
+ fld->fld_dport = scp->addrrem;
}
-extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+void dn_register_sysctl(void);
+void dn_unregister_sysctl(void);
#define DN_MENUVER_ACC 0x01
#define DN_MENUVER_USR 0x02
#define DN_MENUVER_PRX 0x04
#define DN_MENUVER_UIC 0x08
-extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+struct sock *dn_find_by_skb(struct sk_buff *skb);
#define DN_ASCBUF_LEN 9
-extern char *dn_addr2asc(__u16, char *);
-extern int dn_destroy_timer(struct sock *sk);
+char *dn_addr2asc(__u16, char *);
+int dn_destroy_timer(struct sock *sk);
-extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
-extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
+ unsigned char type);
+int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
+ unsigned char *type);
-extern void dn_start_slow_timer(struct sock *sk);
-extern void dn_stop_slow_timer(struct sock *sk);
+void dn_start_slow_timer(struct sock *sk);
+void dn_stop_slow_timer(struct sock *sk);
extern __le16 decnet_address;
extern int decnet_debug_level;
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index b9e32db03f2..197886cd7bd 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -9,7 +9,7 @@ struct dn_ifaddr {
struct dn_dev *ifa_dev;
__le16 ifa_local;
__le16 ifa_address;
- __u8 ifa_flags;
+ __u32 ifa_flags;
__u8 ifa_scope;
char ifa_label[IFNAMSIZ];
struct rcu_head rcu;
@@ -148,27 +148,27 @@ struct rtnode_hello_message {
} __packed;
-extern void dn_dev_init(void);
-extern void dn_dev_cleanup(void);
+void dn_dev_init(void);
+void dn_dev_cleanup(void);
-extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-extern void dn_dev_devices_off(void);
-extern void dn_dev_devices_on(void);
+void dn_dev_devices_off(void);
+void dn_dev_devices_on(void);
-extern void dn_dev_init_pkt(struct sk_buff *skb);
-extern void dn_dev_veri_pkt(struct sk_buff *skb);
-extern void dn_dev_hello(struct sk_buff *skb);
+void dn_dev_init_pkt(struct sk_buff *skb);
+void dn_dev_veri_pkt(struct sk_buff *skb);
+void dn_dev_hello(struct sk_buff *skb);
-extern void dn_dev_up(struct net_device *);
-extern void dn_dev_down(struct net_device *);
+void dn_dev_up(struct net_device *);
+void dn_dev_down(struct net_device *);
-extern int dn_dev_set_default(struct net_device *dev, int force);
-extern struct net_device *dn_dev_get_default(void);
-extern int dn_dev_bind_default(__le16 *addr);
+int dn_dev_set_default(struct net_device *dev, int force);
+struct net_device *dn_dev_get_default(void);
+int dn_dev_bind_default(__le16 *addr);
-extern int register_dnaddr_notifier(struct notifier_block *nb);
-extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+int register_dnaddr_notifier(struct notifier_block *nb);
+int unregister_dnaddr_notifier(struct notifier_block *nb);
static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
{
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index bbcde3238e5..f2ca135ddcc 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -1,24 +1,9 @@
#ifndef _NET_DN_FIB_H
#define _NET_DN_FIB_H
-/* WARNING: The ordering of these elements must match ordering
- * of RTA_* rtnetlink attribute numbers.
- */
-struct dn_kern_rta {
- void *rta_dst;
- void *rta_src;
- int *rta_iif;
- int *rta_oif;
- void *rta_gw;
- u32 *rta_priority;
- void *rta_prefsrc;
- struct rtattr *rta_mx;
- struct rtattr *rta_mp;
- unsigned char *rta_protoinfo;
- u32 *rta_flow;
- struct rta_cacheinfo *rta_ci;
- struct rta_session *rta_sess;
-};
+#include <linux/netlink.h>
+
+extern const struct nla_policy rtm_dn_policy[];
struct dn_fib_res {
struct fib_rule *r;
@@ -31,7 +16,7 @@ struct dn_fib_res {
struct dn_fib_nh {
struct net_device *nh_dev;
- unsigned nh_flags;
+ unsigned int nh_flags;
unsigned char nh_scope;
int nh_weight;
int nh_power;
@@ -45,7 +30,7 @@ struct dn_fib_info {
int fib_treeref;
atomic_t fib_clntref;
int fib_dead;
- unsigned fib_flags;
+ unsigned int fib_flags;
int fib_protocol;
__le16 fib_prefsrc;
__u32 fib_priority;
@@ -93,12 +78,12 @@ struct dn_fib_table {
u32 n;
int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
- struct dn_kern_rta *rta, struct nlmsghdr *n,
+ struct nlattr *attrs[], struct nlmsghdr *n,
struct netlink_skb_parms *req);
int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
- struct dn_kern_rta *rta, struct nlmsghdr *n,
+ struct nlattr *attrs[], struct nlmsghdr *n,
struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct flowi *fl,
+ int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
struct dn_fib_res *res);
int (*flush)(struct dn_fib_table *t);
int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
@@ -110,42 +95,38 @@ struct dn_fib_table {
/*
* dn_fib.c
*/
-extern void dn_fib_init(void);
-extern void dn_fib_cleanup(void);
-
-extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg);
-extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct dn_kern_rta *rta,
- const struct nlmsghdr *nlh, int *errp);
-extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowi *fl,
- struct dn_fib_res *res);
-extern void dn_fib_release_info(struct dn_fib_info *fi);
-extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
-extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowi *fl,
- struct dn_fib_res *res);
+void dn_fib_init(void);
+void dn_fib_cleanup(void);
+
+int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+ struct nlattr *attrs[],
+ const struct nlmsghdr *nlh, int *errp);
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+ const struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_release_info(struct dn_fib_info *fi);
+void dn_fib_flush(void);
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
/*
* dn_tables.c
*/
-extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-extern struct dn_fib_table *dn_fib_empty_table(void);
-extern void dn_fib_table_init(void);
-extern void dn_fib_table_cleanup(void);
+struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
+struct dn_fib_table *dn_fib_empty_table(void);
+void dn_fib_table_init(void);
+void dn_fib_table_cleanup(void);
/*
* dn_rules.c
*/
-extern void dn_fib_rules_init(void);
-extern void dn_fib_rules_cleanup(void);
-extern unsigned dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res);
+void dn_fib_rules_init(void);
+void dn_fib_rules_cleanup(void);
+unsigned int dnet_addr_type(__le16 addr);
+int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_fib_free_info(struct dn_fib_info *fi);
+void dn_fib_free_info(struct dn_fib_info *fi);
static inline void dn_fib_info_put(struct dn_fib_info *fi)
{
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index 4cb4ae7fb81..fac4e3f4a6d 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -16,12 +16,12 @@ struct dn_neigh {
__u8 priority;
};
-extern void dn_neigh_init(void);
-extern void dn_neigh_cleanup(void);
-extern int dn_neigh_router_hello(struct sk_buff *skb);
-extern int dn_neigh_endnode_hello(struct sk_buff *skb);
-extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+void dn_neigh_init(void);
+void dn_neigh_cleanup(void);
+int dn_neigh_router_hello(struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sk_buff *skb);
+void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
extern struct neigh_table dn_neigh_table;
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index e43a2893f13..3a3e33d1845 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -15,29 +15,32 @@
*******************************************************************************/
/* dn_nsp.c functions prototyping */
-extern void dn_nsp_send_data_ack(struct sock *sk);
-extern void dn_nsp_send_oth_ack(struct sock *sk);
-extern void dn_nsp_delayed_ack(struct sock *sk);
-extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-extern void dn_nsp_output(struct sock *sk);
-extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
-extern unsigned long dn_nsp_persist(struct sock *sk);
-extern int dn_nsp_xmit_timeout(struct sock *sk);
-
-extern int dn_nsp_rx(struct sk_buff *);
-extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+void dn_nsp_send_data_ack(struct sock *sk);
+void dn_nsp_send_oth_ack(struct sock *sk);
+void dn_nsp_delayed_ack(struct sock *sk);
+void dn_send_conn_ack(struct sock *sk);
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
+void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+ unsigned short reason, gfp_t gfp);
+void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+ unsigned short reason);
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+
+void dn_nsp_output(struct sock *sk);
+int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *q, unsigned short acknum);
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
+ int oob);
+unsigned long dn_nsp_persist(struct sock *sk);
+int dn_nsp_xmit_timeout(struct sock *sk);
+
+int dn_nsp_rx(struct sk_buff *);
+int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
+ long timeo, int *err);
#define NSP_REASON_OK 0 /* No error */
#define NSP_REASON_NR 1 /* No resources */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 9b185df265f..55df9939bca 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -15,10 +15,13 @@
GNU General Public License for more details.
*******************************************************************************/
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
-extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_rt_cache_flush(int delay);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
+ struct sock *sk, int flags);
+int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void dn_rt_cache_flush(int delay);
+int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
/* Masks for flags field */
#define DN_RT_F_PID 0x07 /* Mask for packet type */
@@ -67,7 +70,9 @@ extern void dn_rt_cache_flush(int delay);
struct dn_route {
struct dst_entry dst;
- struct flowi fl;
+ struct neighbour *n;
+
+ struct flowidn fld;
__le16 rt_saddr;
__le16 rt_daddr;
@@ -76,22 +81,22 @@ struct dn_route {
__le16 rt_src_map;
__le16 rt_dst_map;
- unsigned rt_flags;
- unsigned rt_type;
+ unsigned int rt_flags;
+ unsigned int rt_type;
};
static inline bool dn_is_input_route(struct dn_route *rt)
{
- return rt->fl.iif != 0;
+ return rt->fld.flowidn_iif != 0;
}
static inline bool dn_is_output_route(struct dn_route *rt)
{
- return rt->fl.iif == 0;
+ return rt->fld.flowidn_iif == 0;
}
-extern void dn_route_init(void);
-extern void dn_route_cleanup(void);
+void dn_route_init(void);
+void dn_route_cleanup(void);
#include <net/sock.h>
#include <linux/if_arp.h>
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 839f768f9e3..6efce384451 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -11,6 +11,11 @@
#ifndef __LINUX_NET_DSA_H
#define __LINUX_NET_DSA_H
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
@@ -54,8 +59,148 @@ struct dsa_platform_data {
struct dsa_chip_data *chip;
};
-extern bool dsa_uses_dsa_tags(void *dsa_ptr);
-extern bool dsa_uses_trailer_tags(void *dsa_ptr);
+struct dsa_switch_tree {
+ /*
+ * Configuration data for the platform device that owns
+ * this dsa switch tree instance.
+ */
+ struct dsa_platform_data *pd;
+
+ /*
+ * Reference to network device to use, and which tagging
+ * protocol to use.
+ */
+ struct net_device *master_netdev;
+ __be16 tag_protocol;
+
+ /*
+ * The switch and port to which the CPU is attached.
+ */
+ s8 cpu_switch;
+ s8 cpu_port;
+
+ /*
+ * Link state polling.
+ */
+ int link_poll_needed;
+ struct work_struct link_poll_work;
+ struct timer_list link_poll_timer;
+
+ /*
+ * Data for the individual switch chips.
+ */
+ struct dsa_switch *ds[DSA_MAX_SWITCHES];
+};
+
+struct dsa_switch {
+ /*
+ * Parent switch tree, and switch index.
+ */
+ struct dsa_switch_tree *dst;
+ int index;
+
+ /*
+ * Configuration data for this switch.
+ */
+ struct dsa_chip_data *pd;
+
+ /*
+ * The used switch driver.
+ */
+ struct dsa_switch_driver *drv;
+
+ /*
+ * Reference to mii bus to use.
+ */
+ struct mii_bus *master_mii_bus;
+
+ /*
+ * Slave mii_bus and devices for the individual ports.
+ */
+ u32 dsa_port_mask;
+ u32 phys_port_mask;
+ struct mii_bus *slave_mii_bus;
+ struct net_device *ports[DSA_MAX_PORTS];
+};
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+ return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+}
+
+static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+
+ /*
+ * If this is the root switch (i.e. the switch that connects
+ * to the CPU), return the cpu port number on this switch.
+ * Else return the (DSA) port number that connects to the
+ * switch that is one hop closer to the cpu.
+ */
+ if (dst->cpu_switch == ds->index)
+ return dst->cpu_port;
+ else
+ return ds->pd->rtable[dst->cpu_switch];
+}
+
+struct dsa_switch_driver {
+ struct list_head list;
+
+ __be16 tag_protocol;
+ int priv_size;
+
+ /*
+ * Probing and setup.
+ */
+ char *(*probe)(struct mii_bus *bus, int sw_addr);
+ int (*setup)(struct dsa_switch *ds);
+ int (*set_addr)(struct dsa_switch *ds, u8 *addr);
+
+ /*
+ * Access to the switch's PHY registers.
+ */
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port,
+ int regnum, u16 val);
+
+ /*
+ * Link state polling and IRQ handling.
+ */
+ void (*poll_link)(struct dsa_switch *ds);
+
+ /*
+ * ethtool hardware statistics.
+ */
+ void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+ void (*get_ethtool_stats)(struct dsa_switch *ds,
+ int port, uint64_t *data);
+ int (*get_sset_count)(struct dsa_switch *ds);
+};
+
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
+
+static inline void *ds_to_priv(struct dsa_switch *ds)
+{
+ return (void *)(ds + 1);
+}
+
+/*
+ * The original DSA tag format and some other tag formats have no
+ * ethertype, which means that we need to add a little hack to the
+ * networking receive path to make sure that received frames get
+ * the right ->protocol assigned to them when one of those tag
+ * formats is in use.
+ */
+static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
+{
+ return !!(dst->tag_protocol == htons(ETH_P_DSA));
+}
+static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+{
+ return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+}
#endif
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
index 8a8d4e06900..e1ad903a8d6 100644
--- a/include/net/dsfield.h
+++ b/include/net/dsfield.h
@@ -43,11 +43,9 @@ static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
__u8 value)
{
- __u16 tmp;
+ __be16 *p = (__force __be16 *)ipv6h;
- tmp = ntohs(*(__be16 *) ipv6h);
- tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4);
- *(__be16 *) ipv6h = htons(tmp);
+ *p = (*p & htons((((u16)mask << 4) | 0xf00f))) | htons((u16)value << 4);
}
diff --git a/include/net/dst.h b/include/net/dst.h
index a5bd72646d6..71c60f42be4 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -12,17 +12,11 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
+#include <linux/bug.h>
#include <linux/jiffies.h>
#include <net/neighbour.h>
#include <asm/processor.h>
-/*
- * 0 - no debugging messages
- * 1 - rare events and bugs (default)
- * 2 - trace mode.
- */
-#define RT_CACHE_DEBUG 0
-
#define DST_GC_MIN (HZ/10)
#define DST_GC_INC (HZ/2)
#define DST_GC_MAX (120*HZ)
@@ -40,51 +34,61 @@ struct dst_entry {
struct rcu_head rcu_head;
struct dst_entry *child;
struct net_device *dev;
- short error;
- short obsolete;
- int flags;
-#define DST_HOST 0x0001
-#define DST_NOXFRM 0x0002
-#define DST_NOPOLICY 0x0004
-#define DST_NOHASH 0x0008
-#define DST_NOCACHE 0x0010
- unsigned long expires;
-
- unsigned short header_len; /* more space at head required */
- unsigned short trailer_len; /* space to reserve at tail */
-
- unsigned int rate_tokens;
- unsigned long rate_last; /* rate limiting for ICMP */
-
+ struct dst_ops *ops;
+ unsigned long _metrics;
+ unsigned long expires;
struct dst_entry *path;
-
- struct neighbour *neighbour;
- struct hh_cache *hh;
+ struct dst_entry *from;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
#else
void *__pad1;
#endif
- int (*input)(struct sk_buff*);
- int (*output)(struct sk_buff*);
+ int (*input)(struct sk_buff *);
+ int (*output)(struct sock *sk, struct sk_buff *skb);
- struct dst_ops *ops;
+ unsigned short flags;
+#define DST_HOST 0x0001
+#define DST_NOXFRM 0x0002
+#define DST_NOPOLICY 0x0004
+#define DST_NOHASH 0x0008
+#define DST_NOCACHE 0x0010
+#define DST_NOCOUNT 0x0020
+#define DST_FAKE_RTABLE 0x0040
+#define DST_XFRM_TUNNEL 0x0080
+#define DST_XFRM_QUEUE 0x0100
+
+ unsigned short pending_confirm;
- u32 metrics[RTAX_MAX];
+ short error;
-#ifdef CONFIG_NET_CLS_ROUTE
+ /* A non-zero value of dst->obsolete forces by-hand validation
+ * of the route entry. Positive values are set by the generic
+ * dst layer to indicate that the entry has been forcefully
+ * destroyed.
+ *
+ * Negative values are used by the implementation layer code to
+ * force invocation of the dst_ops->check() method.
+ */
+ short obsolete;
+#define DST_OBSOLETE_NONE 0
+#define DST_OBSOLETE_DEAD 2
+#define DST_OBSOLETE_FORCE_CHK -1
+#define DST_OBSOLETE_KILL -2
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 tclassid;
#else
__u32 __pad2;
#endif
-
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
#ifdef CONFIG_64BIT
- long __pad_to_align_refcnt[1];
+ long __pad_to_align_refcnt[2];
#endif
/*
* __refcnt wants to be on a different cache line from
@@ -101,12 +105,107 @@ struct dst_entry {
};
};
-#ifdef __KERNEL__
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[];
+
+#define DST_METRICS_READ_ONLY 0x1UL
+#define DST_METRICS_FORCE_OVERWRITE 0x2UL
+#define DST_METRICS_FLAGS 0x3UL
+#define __DST_METRICS_PTR(Y) \
+ ((u32 *)((Y) & ~DST_METRICS_FLAGS))
+#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+ return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
+{
+ dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
+}
+
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+ unsigned long val = dst->_metrics;
+ if (!(val & DST_METRICS_READ_ONLY))
+ __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+ unsigned long p = dst->_metrics;
+
+ BUG_ON(!p);
+
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+ const u32 *src_metrics,
+ bool read_only)
+{
+ dst->_metrics = ((unsigned long) src_metrics) |
+ (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+ u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+ if (dst_metrics) {
+ u32 *src_metrics = DST_METRICS_PTR(src);
+
+ memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+ }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+ return DST_METRICS_PTR(dst);
+}
+
+static inline u32
+dst_metric_raw(const struct dst_entry *dst, const int metric)
+{
+ u32 *p = DST_METRICS_PTR(dst);
+
+ return p[metric-1];
+}
static inline u32
-dst_metric(const struct dst_entry *dst, int metric)
+dst_metric(const struct dst_entry *dst, const int metric)
{
- return dst->metrics[metric-1];
+ WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
+ metric == RTAX_ADVMSS ||
+ metric == RTAX_MTU);
+ return dst_metric_raw(dst, metric);
+}
+
+static inline u32
+dst_metric_advmss(const struct dst_entry *dst)
+{
+ u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
+
+ if (!advmss)
+ advmss = dst->ops->default_advmss(dst);
+
+ return advmss;
+}
+
+static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
+{
+ u32 *p = dst_metrics_write_ptr(dst);
+
+ if (p)
+ p[metric-1] = val;
}
static inline u32
@@ -117,12 +216,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- u32 mtu = dst_metric(dst, RTAX_MTU);
- /*
- * Alexey put it here, so ask him about it :)
- */
- barrier();
- return mtu;
+ return dst->ops->mtu(dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
@@ -131,28 +225,20 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
return msecs_to_jiffies(dst_metric(dst, metric));
}
-static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
- unsigned long rtt)
-{
- dst->metrics[metric-1] = jiffies_to_msecs(rtt);
-}
-
static inline u32
dst_allfrag(const struct dst_entry *dst)
{
int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- /* Yes, _exactly_. This is paranoia. */
- barrier();
return ret;
}
static inline int
-dst_metric_locked(struct dst_entry *dst, int metric)
+dst_metric_locked(const struct dst_entry *dst, int metric)
{
return dst_metric(dst, RTAX_LOCK) & (1<<metric);
}
-static inline void dst_hold(struct dst_entry * dst)
+static inline void dst_hold(struct dst_entry *dst)
{
/*
* If your kernel compilation stops here, please check
@@ -175,15 +261,14 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
dst->lastuse = time;
}
-static inline
-struct dst_entry * dst_clone(struct dst_entry * dst)
+static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
atomic_inc(&dst->__refcnt);
return dst;
}
-extern void dst_release(struct dst_entry *dst);
+void dst_release(struct dst_entry *dst);
static inline void refdst_drop(unsigned long refdst)
{
@@ -232,17 +317,24 @@ static inline void skb_dst_force(struct sk_buff *skb)
* __skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
+ * @net: netns for packet i/o
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups. (no accounting done)
*/
-static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
skb->dev = dev;
- skb->rxhash = 0;
+
+ /*
+ * Clear hash so that we can recalulate the hash for the
+ * encapsulated packet, unless we have already determine the hash
+ * over the L4 4-tuple.
+ */
+ skb_clear_hash_if_not_l4(skb);
skb_set_queue_mapping(skb, 0);
- skb_dst_drop(skb);
- nf_reset(skb);
+ skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
}
/**
@@ -254,12 +346,13 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
* so make some cleanups, and perform accounting.
* Note: this accounting is not SMP safe.
*/
-static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- __skb_tunnel_rx(skb, dev);
+ __skb_tunnel_rx(skb, dev, net);
}
/* Children define the path of the packet through the
@@ -268,20 +361,25 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{
- struct dst_entry *child = skb_dst(skb)->child;
+ struct dst_entry *child = dst_clone(skb_dst(skb)->child);
skb_dst_drop(skb);
return child;
}
-extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
-extern void __dst_free(struct dst_entry * dst);
-extern struct dst_entry *dst_destroy(struct dst_entry * dst);
+int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dst_discard(struct sk_buff *skb)
+{
+ return dst_discard_sk(skb->sk, skb);
+}
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+ int initial_obsolete, unsigned short flags);
+void __dst_free(struct dst_entry *dst);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
-static inline void dst_free(struct dst_entry * dst)
+static inline void dst_free(struct dst_entry *dst)
{
- if (dst->obsolete > 1)
+ if (dst->obsolete > 0)
return;
if (!atomic_read(&dst->__refcnt)) {
dst = dst_destroy(dst);
@@ -299,8 +397,41 @@ static inline void dst_rcu_free(struct rcu_head *head)
static inline void dst_confirm(struct dst_entry *dst)
{
- if (dst)
- neigh_confirm(dst->neighbour);
+ dst->pending_confirm = 1;
+}
+
+static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
+ struct sk_buff *skb)
+{
+ const struct hh_cache *hh;
+
+ if (dst->pending_confirm) {
+ unsigned long now = jiffies;
+
+ dst->pending_confirm = 0;
+ /* avoid dirtying neighbour */
+ if (n->confirmed != now)
+ n->confirmed = now;
+ }
+
+ hh = &n->hh;
+ if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ return neigh_hh_output(hh, skb);
+ else
+ return n->output(n, skb);
+}
+
+static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+{
+ struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
+ return IS_ERR(n) ? NULL : n;
+}
+
+static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
+ struct sk_buff *skb)
+{
+ struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
+ return IS_ERR(n) ? NULL : n;
}
static inline void dst_link_failure(struct sk_buff *skb)
@@ -322,9 +453,13 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
}
/* Output packet to network from transport. */
+static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
+{
+ return skb_dst(skb)->output(sk, skb);
+}
static inline int dst_output(struct sk_buff *skb)
{
- return skb_dst(skb)->output(skb);
+ return dst_output_sk(skb->sk, skb);
}
/* Input packet from network to transport. */
@@ -340,32 +475,38 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
-extern void dst_init(void);
+void dst_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
- XFRM_LOOKUP_WAIT = 1 << 0,
- XFRM_LOOKUP_ICMP = 1 << 1,
+ XFRM_LOOKUP_ICMP = 1 << 0,
};
struct flowi;
#ifndef CONFIG_XFRM
-static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags)
+static inline struct dst_entry *xfrm_lookup(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags)
{
- return 0;
+ return dst_orig;
}
-static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags)
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
- return 0;
+ return NULL;
}
+
#else
-extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags);
-extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
- struct flowi *fl, struct sock *sk, int flags);
-#endif
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+ return dst->xfrm;
+}
#endif
#endif /* _NET_DST_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 51665b3461b..2f26dfb8450 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -8,21 +8,31 @@ struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
+struct sock;
struct dst_ops {
unsigned short family;
__be16 protocol;
- unsigned gc_thresh;
+ unsigned int gc_thresh;
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
+ unsigned int (*default_advmss)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
- void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
+ void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+ void (*redirect)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
int (*local_out)(struct sk_buff *skb);
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
struct kmem_cache *kmem_cachep;
diff --git a/include/net/esp.h b/include/net/esp.h
index d58451331db..a43be85aedc 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -3,18 +3,6 @@
#include <linux/skbuff.h>
-struct crypto_aead;
-
-struct esp_data {
- /* 0..255 */
- int padlen;
-
- /* Confidentiality & Integrity */
- struct crypto_aead *aead;
-};
-
-extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
-
struct ip_esp_hdr;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789b27b..2a2d6bb34eb 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
struct ethoc_platform_data {
u8 hwaddr[IFHWADDRLEN];
s8 phy_id;
+ u32 eth_clkfreq;
};
#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 075f1e3a0fe..e584de16e4c 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -10,21 +10,25 @@
struct fib_rule {
struct list_head list;
- atomic_t refcnt;
int iifindex;
int oifindex;
u32 mark;
u32 mark_mask;
- u32 pref;
u32 flags;
u32 table;
u8 action;
+ /* 3 bytes hole, try to use */
u32 target;
struct fib_rule __rcu *ctarget;
+ struct net *fr_net;
+
+ atomic_t refcnt;
+ u32 pref;
+ int suppress_ifgroup;
+ int suppress_prefixlen;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;
- struct net * fr_net;
};
struct fib_lookup_arg {
@@ -46,12 +50,15 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
+ bool (*suppress)(struct fib_rule *,
+ struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
int (*configure)(struct fib_rule *,
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **);
+ void (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
@@ -79,6 +86,8 @@ struct fib_rules_ops {
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }
static inline void fib_rule_get(struct fib_rule *rule)
@@ -106,14 +115,13 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
return frh->table;
}
-extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
-extern void fib_rules_unregister(struct fib_rules_ops *);
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+ struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
-extern int fib_rules_lookup(struct fib_rules_ops *,
- struct flowi *, int flags,
- struct fib_lookup_arg *);
-extern int fib_default_rule_add(struct fib_rules_ops *,
- u32 pref, u32 table,
- u32 flags);
-extern u32 fib_default_rule_pref(struct fib_rules_ops *ops);
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+ struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+ u32 flags);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
diff --git a/include/net/firewire.h b/include/net/firewire.h
new file mode 100644
index 00000000000..31bcbfe7a22
--- /dev/null
+++ b/include/net/firewire.h
@@ -0,0 +1,25 @@
+#ifndef _NET_FIREWIRE_H
+#define _NET_FIREWIRE_H
+
+/* Pseudo L2 address */
+#define FWNET_ALEN 16
+union fwnet_hwaddr {
+ u8 u[FWNET_ALEN];
+ /* "Hardware address" defined in RFC2734/RF3146 */
+ struct {
+ __be64 uniq_id; /* EUI-64 */
+ u8 max_rec; /* max packet size */
+ u8 sspd; /* max speed */
+ __be16 fifo_hi; /* hi 16bits of FIFO addr */
+ __be32 fifo_lo; /* lo 32bits of FIFO addr */
+ } __packed uc;
+};
+
+/* Pseudo L2 Header */
+#define FWNET_HLEN 18
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __packed;
+
+#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 7196e6864b8..8109a159d1b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -7,82 +7,199 @@
#ifndef _NET_FLOW_H
#define _NET_FLOW_H
+#include <linux/socket.h>
#include <linux/in6.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
-struct flowi {
- int oif;
- int iif;
- __u32 mark;
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+#define LOOPBACK_IFINDEX 1
+
+struct flowi_common {
+ int flowic_oif;
+ int flowic_iif;
+ __u32 flowic_mark;
+ __u8 flowic_tos;
+ __u8 flowic_scope;
+ __u8 flowic_proto;
+ __u8 flowic_flags;
+#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_KNOWN_NH 0x02
+ __u32 flowic_secid;
+};
+
+union flowi_uli {
+ struct {
+ __be16 dport;
+ __be16 sport;
+ } ports;
+
+ struct {
+ __u8 type;
+ __u8 code;
+ } icmpt;
+
+ struct {
+ __le16 dport;
+ __le16 sport;
+ } dnports;
+
+ __be32 spi;
+ __be32 gre_key;
+
+ struct {
+ __u8 type;
+ } mht;
+};
+
+struct flowi4 {
+ struct flowi_common __fl_common;
+#define flowi4_oif __fl_common.flowic_oif
+#define flowi4_iif __fl_common.flowic_iif
+#define flowi4_mark __fl_common.flowic_mark
+#define flowi4_tos __fl_common.flowic_tos
+#define flowi4_scope __fl_common.flowic_scope
+#define flowi4_proto __fl_common.flowic_proto
+#define flowi4_flags __fl_common.flowic_flags
+#define flowi4_secid __fl_common.flowic_secid
+
+ /* (saddr,daddr) must be grouped, same order as in IP header */
+ __be32 saddr;
+ __be32 daddr;
+
+ union flowi_uli uli;
+#define fl4_sport uli.ports.sport
+#define fl4_dport uli.ports.dport
+#define fl4_icmp_type uli.icmpt.type
+#define fl4_icmp_code uli.icmpt.code
+#define fl4_ipsec_spi uli.spi
+#define fl4_mh_type uli.mht.type
+#define fl4_gre_key uli.gre_key
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
+ __u32 mark, __u8 tos, __u8 scope,
+ __u8 proto, __u8 flags,
+ __be32 daddr, __be32 saddr,
+ __be16 dport, __be16 sport)
+{
+ fl4->flowi4_oif = oif;
+ fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ fl4->flowi4_mark = mark;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_scope = scope;
+ fl4->flowi4_proto = proto;
+ fl4->flowi4_flags = flags;
+ fl4->flowi4_secid = 0;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+}
+
+/* Reset some input parameters after previous lookup */
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+ __be32 daddr, __be32 saddr)
+{
+ fl4->flowi4_oif = oif;
+ fl4->flowi4_tos = tos;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+}
+
+
+struct flowi6 {
+ struct flowi_common __fl_common;
+#define flowi6_oif __fl_common.flowic_oif
+#define flowi6_iif __fl_common.flowic_iif
+#define flowi6_mark __fl_common.flowic_mark
+#define flowi6_tos __fl_common.flowic_tos
+#define flowi6_scope __fl_common.flowic_scope
+#define flowi6_proto __fl_common.flowic_proto
+#define flowi6_flags __fl_common.flowic_flags
+#define flowi6_secid __fl_common.flowic_secid
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ __be32 flowlabel;
+ union flowi_uli uli;
+#define fl6_sport uli.ports.sport
+#define fl6_dport uli.ports.dport
+#define fl6_icmp_type uli.icmpt.type
+#define fl6_icmp_code uli.icmpt.code
+#define fl6_ipsec_spi uli.spi
+#define fl6_mh_type uli.mht.type
+#define fl6_gre_key uli.gre_key
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+struct flowidn {
+ struct flowi_common __fl_common;
+#define flowidn_oif __fl_common.flowic_oif
+#define flowidn_iif __fl_common.flowic_iif
+#define flowidn_mark __fl_common.flowic_mark
+#define flowidn_scope __fl_common.flowic_scope
+#define flowidn_proto __fl_common.flowic_proto
+#define flowidn_flags __fl_common.flowic_flags
+ __le16 daddr;
+ __le16 saddr;
+ union flowi_uli uli;
+#define fld_sport uli.ports.sport
+#define fld_dport uli.ports.dport
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
+
+struct flowi {
union {
- struct {
- __be32 daddr;
- __be32 saddr;
- __u8 tos;
- __u8 scope;
- } ip4_u;
-
- struct {
- struct in6_addr daddr;
- struct in6_addr saddr;
- __be32 flowlabel;
- } ip6_u;
-
- struct {
- __le16 daddr;
- __le16 saddr;
- __u8 scope;
- } dn_u;
- } nl_u;
-#define fld_dst nl_u.dn_u.daddr
-#define fld_src nl_u.dn_u.saddr
-#define fld_scope nl_u.dn_u.scope
-#define fl6_dst nl_u.ip6_u.daddr
-#define fl6_src nl_u.ip6_u.saddr
-#define fl6_flowlabel nl_u.ip6_u.flowlabel
-#define fl4_dst nl_u.ip4_u.daddr
-#define fl4_src nl_u.ip4_u.saddr
-#define fl4_tos nl_u.ip4_u.tos
-#define fl4_scope nl_u.ip4_u.scope
-
- __u8 proto;
- __u8 flags;
-#define FLOWI_FLAG_ANYSRC 0x01
-#define FLOWI_FLAG_MATCH_ANY_IIF 0x02
- union {
- struct {
- __be16 sport;
- __be16 dport;
- } ports;
-
- struct {
- __u8 type;
- __u8 code;
- } icmpt;
-
- struct {
- __le16 sport;
- __le16 dport;
- } dnports;
-
- __be32 spi;
- __be32 gre_key;
-
- struct {
- __u8 type;
- } mht;
- } uli_u;
-#define fl_ip_sport uli_u.ports.sport
-#define fl_ip_dport uli_u.ports.dport
-#define fl_icmp_type uli_u.icmpt.type
-#define fl_icmp_code uli_u.icmpt.code
-#define fl_ipsec_spi uli_u.spi
-#define fl_mh_type uli_u.mht.type
-#define fl_gre_key uli_u.gre_key
- __u32 secid; /* used by xfrm; see secid.txt */
+ struct flowi_common __fl_common;
+ struct flowi4 ip4;
+ struct flowi6 ip6;
+ struct flowidn dn;
+ } u;
+#define flowi_oif u.__fl_common.flowic_oif
+#define flowi_iif u.__fl_common.flowic_iif
+#define flowi_mark u.__fl_common.flowic_mark
+#define flowi_tos u.__fl_common.flowic_tos
+#define flowi_scope u.__fl_common.flowic_scope
+#define flowi_proto u.__fl_common.flowic_proto
+#define flowi_flags u.__fl_common.flowic_flags
+#define flowi_secid u.__fl_common.flowic_secid
} __attribute__((__aligned__(BITS_PER_LONG/8)));
+static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
+{
+ return container_of(fl4, struct flowi, u.ip4);
+}
+
+static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+{
+ return container_of(fl6, struct flowi, u.ip6);
+}
+
+static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+{
+ return container_of(fldn, struct flowi, u.dn);
+}
+
+typedef unsigned long flow_compare_t;
+
+static inline size_t flow_key_size(u16 family)
+{
+ switch (family) {
+ case AF_INET:
+ BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
+ return sizeof(struct flowi4) / sizeof(flow_compare_t);
+ case AF_INET6:
+ BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
+ return sizeof(struct flowi6) / sizeof(flow_compare_t);
+ case AF_DECnet:
+ BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
+ return sizeof(struct flowidn) / sizeof(flow_compare_t);
+ }
+ return 0;
+}
+
#define FLOW_DIR_IN 0
#define FLOW_DIR_OUT 1
#define FLOW_DIR_FWD 2
@@ -102,20 +219,18 @@ struct flow_cache_ops {
};
typedef struct flow_cache_object *(*flow_resolve_t)(
- struct net *net, struct flowi *key, u16 family,
+ struct net *net, const struct flowi *key, u16 family,
u8 dir, struct flow_cache_object *oldobj, void *ctx);
-extern struct flow_cache_object *flow_cache_lookup(
- struct net *net, struct flowi *key, u16 family,
- u8 dir, flow_resolve_t resolver, void *ctx);
+struct flow_cache_object *flow_cache_lookup(struct net *net,
+ const struct flowi *key, u16 family,
+ u8 dir, flow_resolve_t resolver,
+ void *ctx);
+int flow_cache_init(struct net *net);
+void flow_cache_fini(struct net *net);
-extern void flow_cache_flush(void);
+void flow_cache_flush(struct net *net);
+void flow_cache_flush_deferred(struct net *net);
extern atomic_t flow_cache_genid;
-static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
-{
- return (fl1->proto == fl2->proto &&
- !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
-}
-
#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644
index 00000000000..7e64bd8bbda
--- /dev/null
+++ b/include/net/flow_keys.h
@@ -0,0 +1,18 @@
+#ifndef _NET_FLOW_KEYS_H
+#define _NET_FLOW_KEYS_H
+
+struct flow_keys {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ u16 thoff;
+ u8 ip_proto;
+};
+
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+#endif
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
new file mode 100644
index 00000000000..c8f665ec6e0
--- /dev/null
+++ b/include/net/flowcache.h
@@ -0,0 +1,25 @@
+#ifndef _NET_FLOWCACHE_H
+#define _NET_FLOWCACHE_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+
+struct flow_cache_percpu {
+ struct hlist_head *hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+ struct tasklet_struct flush_tasklet;
+};
+
+struct flow_cache {
+ u32 hash_shift;
+ struct flow_cache_percpu __percpu *percpu;
+ struct notifier_block hotcpu_notifier;
+ int low_watermark;
+ int high_watermark;
+ struct timer_list rnd_timer;
+};
+#endif /* _NET_FLOWCACHE_H */
diff --git a/include/net/garp.h b/include/net/garp.h
index f4c295984c4..abf33bbd2e6 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -104,25 +104,26 @@ struct garp_applicant {
struct sk_buff_head queue;
struct sk_buff *pdu;
struct rb_root gid;
+ struct rcu_head rcu;
};
struct garp_port {
struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
+ struct rcu_head rcu;
};
-extern int garp_register_application(struct garp_application *app);
-extern void garp_unregister_application(struct garp_application *app);
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
-extern int garp_init_applicant(struct net_device *dev,
- struct garp_application *app);
-extern void garp_uninit_applicant(struct net_device *dev,
- struct garp_application *app);
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+ struct garp_application *app);
-extern int garp_request_join(const struct net_device *dev,
- const struct garp_application *app,
- const void *data, u8 len, u8 type);
-extern void garp_request_leave(const struct net_device *dev,
- const struct garp_application *app,
- const void *data, u8 len, u8 type);
+int garp_request_join(const struct net_device *dev,
+ const struct garp_application *app, const void *data,
+ u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+ const struct garp_application *app,
+ const void *data, u8 len, u8 type);
#endif /* _NET_GARP_H */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index fa157712e98..ea4271dceff 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -11,7 +11,7 @@ struct gnet_dump {
struct sk_buff * skb;
struct nlattr * tail;
- /* Backward compatability */
+ /* Backward compatibility */
int compat_tc_stats;
int compat_xstats;
void * xstats;
@@ -19,32 +19,31 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
-extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+ struct gnet_dump *d);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ int tc_stats_type, int xstats_type,
spinlock_t *lock, struct gnet_dump *d);
-extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
- int tc_stats_type,int xstats_type,
- spinlock_t *lock, struct gnet_dump *d);
-
-extern int gnet_stats_copy_basic(struct gnet_dump *d,
- struct gnet_stats_basic_packed *b);
-extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
- const struct gnet_stats_basic_packed *b,
- struct gnet_stats_rate_est *r);
-extern int gnet_stats_copy_queue(struct gnet_dump *d,
- struct gnet_stats_queue *q);
-extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
-
-extern int gnet_stats_finish_copy(struct gnet_dump *d);
-
-extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est *rate_est);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+ const struct gnet_stats_basic_packed *b,
+ struct gnet_stats_rate_est64 *r);
+int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+int gnet_stats_finish_copy(struct gnet_dump *d);
+
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock, struct nlattr *opt);
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est);
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock, struct nlattr *opt);
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+ const struct gnet_stats_rate_est64 *rate_est);
#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8a64b811a39..93695f0e22a 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -5,19 +5,14 @@
#include <net/netlink.h>
#include <net/net_namespace.h>
+#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
- * @id: multicast group ID, assigned by the core, to use with
- * genlmsg_multicast().
- * @list: list entry for linking
- * @family: pointer to family, need not be set before registering
*/
struct genl_multicast_group {
- struct genl_family *family; /* private */
- struct list_head list; /* private */
char name[GENL_NAMSIZ];
- u32 id;
};
struct genl_ops;
@@ -37,9 +32,12 @@ struct genl_info;
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
* @attrbuf: buffer to store parsed attributes
- * @ops_list: list of all assigned operations
* @family_list: family list
- * @mcast_groups: multicast groups list
+ * @mcgrps: multicast groups used by this family (private)
+ * @n_mcgrps: number of multicast groups (private)
+ * @mcgrp_offset: starting number of multicast group IDs in this family
+ * @ops: the operations supported by this family (private)
+ * @n_ops: number of operations supported by this family (private)
*/
struct genl_family {
unsigned int id;
@@ -48,32 +46,38 @@ struct genl_family {
unsigned int version;
unsigned int maxattr;
bool netnsok;
- int (*pre_doit)(struct genl_ops *ops,
+ bool parallel_ops;
+ int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
- void (*post_doit)(struct genl_ops *ops,
+ void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
struct nlattr ** attrbuf; /* private */
- struct list_head ops_list; /* private */
+ const struct genl_ops * ops; /* private */
+ const struct genl_multicast_group *mcgrps; /* private */
+ unsigned int n_ops; /* private */
+ unsigned int n_mcgrps; /* private */
+ unsigned int mcgrp_offset; /* private */
struct list_head family_list; /* private */
- struct list_head mcast_groups; /* private */
+ struct module *module;
};
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
- * @snd_pid: netlink pid of sender
+ * @snd_portid: netlink portid of sender
* @nlhdr: netlink message header
* @genlhdr: generic netlink message header
* @userhdr: user specific header
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
+ * @dst_sk: destination socket
*/
struct genl_info {
u32 snd_seq;
- u32 snd_pid;
+ u32 snd_portid;
struct nlmsghdr * nlhdr;
struct genlmsghdr * genlhdr;
void * userhdr;
@@ -82,6 +86,7 @@ struct genl_info {
struct net * _net;
#endif
void * user_ptr[2];
+ struct sock * dst_sk;
};
static inline struct net *genl_info_net(struct genl_info *info)
@@ -106,57 +111,109 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
* @ops_list: operations list
*/
struct genl_ops {
- u8 cmd;
- u8 internal_flags;
- unsigned int flags;
const struct nla_policy *policy;
int (*doit)(struct sk_buff *skb,
struct genl_info *info);
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
- struct list_head ops_list;
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
};
-extern int genl_register_family(struct genl_family *family);
-extern int genl_register_family_with_ops(struct genl_family *family,
- struct genl_ops *ops, size_t n_ops);
-extern int genl_unregister_family(struct genl_family *family);
-extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_register_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp);
-extern void genl_unregister_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp);
+int __genl_register_family(struct genl_family *family);
+
+static inline int genl_register_family(struct genl_family *family)
+{
+ family->module = THIS_MODULE;
+ return __genl_register_family(family);
+}
/**
- * genlmsg_put - Add generic netlink header to netlink message
- * @skb: socket buffer holding the message
- * @pid: netlink pid the message is addressed to
- * @seq: sequence number (usually the one of the sender)
+ * genl_register_family_with_ops - register a generic netlink family with ops
* @family: generic netlink family
- * @flags netlink message flags
- * @cmd: generic netlink command
+ * @ops: operations to be registered
+ * @n_ops: number of elements to register
*
- * Returns pointer to user specific header
+ * Registers the specified family and operations from the specified table.
+ * Only one family may be registered with the same family name or identifier.
+ *
+ * The family id may equal GENL_ID_GENERATE causing an unique id to
+ * be automatically generated and assigned.
+ *
+ * Either a doit or dumpit callback must be specified for every registered
+ * operation or the function will fail. Only one operation structure per
+ * command identifier may be registered.
+ *
+ * See include/net/genetlink.h for more documenation on the operations
+ * structure.
+ *
+ * Return 0 on success or a negative error code.
*/
-static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
- struct genl_family *family, int flags, u8 cmd)
+static inline int
+_genl_register_family_with_ops_grps(struct genl_family *family,
+ const struct genl_ops *ops, size_t n_ops,
+ const struct genl_multicast_group *mcgrps,
+ size_t n_mcgrps)
{
- struct nlmsghdr *nlh;
- struct genlmsghdr *hdr;
+ family->module = THIS_MODULE;
+ family->ops = ops;
+ family->n_ops = n_ops;
+ family->mcgrps = mcgrps;
+ family->n_mcgrps = n_mcgrps;
+ return __genl_register_family(family);
+}
- nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
- family->hdrsize, flags);
- if (nlh == NULL)
- return NULL;
+#define genl_register_family_with_ops(family, ops) \
+ _genl_register_family_with_ops_grps((family), \
+ (ops), ARRAY_SIZE(ops), \
+ NULL, 0)
+#define genl_register_family_with_ops_groups(family, ops, grps) \
+ _genl_register_family_with_ops_grps((family), \
+ (ops), ARRAY_SIZE(ops), \
+ (grps), ARRAY_SIZE(grps))
- hdr = nlmsg_data(nlh);
- hdr->cmd = cmd;
- hdr->version = family->version;
- hdr->reserved = 0;
+int genl_unregister_family(struct genl_family *family);
+void genl_notify(struct genl_family *family,
+ struct sk_buff *skb, struct net *net, u32 portid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
- return (char *) hdr + GENL_HDRLEN;
+struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
+ gfp_t flags);
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
+ struct genl_family *family, int flags, u8 cmd);
+
+/**
+ * genlmsg_nlhdr - Obtain netlink header from user specified header
+ * @user_hdr: user header as returned from genlmsg_put()
+ * @family: generic netlink family
+ *
+ * Returns pointer to netlink header.
+ */
+static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr,
+ struct genl_family *family)
+{
+ return (struct nlmsghdr *)((char *)user_hdr -
+ family->hdrsize -
+ GENL_HDRLEN -
+ NLMSG_HDRLEN);
+}
+
+/**
+ * genl_dump_check_consistent - check if sequence is consistent and advertise if not
+ * @cb: netlink callback structure that stores the sequence number
+ * @user_hdr: user header as returned from genlmsg_put()
+ * @family: generic netlink family
+ *
+ * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it
+ * simpler to use with generic netlink.
+ */
+static inline void genl_dump_check_consistent(struct netlink_callback *cb,
+ void *user_hdr,
+ struct genl_family *family)
+{
+ nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family));
}
/**
@@ -174,7 +231,7 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
struct genl_family *family,
int flags, u8 cmd)
{
- return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+ return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
flags, cmd);
}
@@ -195,56 +252,67 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
*/
static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
{
- nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+ if (hdr)
+ nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
* genlmsg_multicast_netns - multicast a netlink message to a specific netns
+ * @family: the generic netlink family
* @net: the net namespace
* @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
- * @group: multicast group id
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
* @flags: allocation flags
*/
-static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb,
- u32 pid, unsigned int group, gfp_t flags)
+static inline int genlmsg_multicast_netns(struct genl_family *family,
+ struct net *net, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
{
- return nlmsg_multicast(net->genl_sock, skb, pid, group, flags);
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
}
/**
* genlmsg_multicast - multicast a netlink message to the default netns
+ * @family: the generic netlink family
* @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
- * @group: multicast group id
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
* @flags: allocation flags
*/
-static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid,
+static inline int genlmsg_multicast(struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
{
- return genlmsg_multicast_netns(&init_net, skb, pid, group, flags);
+ return genlmsg_multicast_netns(family, &init_net, skb,
+ portid, group, flags);
}
/**
* genlmsg_multicast_allns - multicast a netlink message to all net namespaces
+ * @family: the generic netlink family
* @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
- * @group: multicast group id
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
* @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
-int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid,
+int genlmsg_multicast_allns(struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags);
/**
* genlmsg_unicast - unicast a netlink message
* @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
*/
-static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
+static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
{
- return nlmsg_unicast(net->genl_sock, skb, pid);
+ return nlmsg_unicast(net->genl_sock, skb, portid);
}
/**
@@ -254,12 +322,12 @@ static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
*/
static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
{
- return genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
}
/**
* gennlmsg_data - head of message payload
- * @gnlh: genetlink messsage header
+ * @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
{
@@ -305,5 +373,25 @@ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
return nlmsg_new(genlmsg_total_size(payload), flags);
}
+/**
+ * genl_set_err - report error to genetlink broadcast listeners
+ * @family: the generic netlink family
+ * @net: the network namespace to report the error to
+ * @portid: the PORTID of a process that we want to skip (if any)
+ * @group: the broadcast group that will notice the error
+ * (this is the offset of the multicast group in the groups array)
+ * @code: error code, must be negative (as usual in kernelspace)
+ *
+ * This function returns the number of broadcast listeners that have set the
+ * NETLINK_RECV_NO_ENOBUFS socket option.
+ */
+static inline int genl_set_err(struct genl_family *family, struct net *net,
+ u32 portid, u32 group, int code)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return netlink_set_err(net->genl_sock, portid, group, code);
+}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index 82665474bcb..b5318201874 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -2,17 +2,103 @@
#define __LINUX_GRE_H
#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
#define GREPROTO_CISCO 0
#define GREPROTO_PPTP 1
#define GREPROTO_MAX 2
+#define GRE_IP_PROTO_MAX 2
struct gre_protocol {
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
};
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
int gre_add_protocol(const struct gre_protocol *proto, u8 version);
int gre_del_protocol(const struct gre_protocol *proto, u8 version);
+struct gre_cisco_protocol {
+ int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
+ int (*err_handler)(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi);
+ u8 priority;
+};
+
+int gre_cisco_register(struct gre_cisco_protocol *proto);
+int gre_cisco_unregister(struct gre_cisco_protocol *proto);
+
+void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+ int hdr_len);
+
+static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+ bool csum)
+{
+ return iptunnel_handle_offloads(skb, csum,
+ csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+
+static inline int ip_gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags&TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags&TUNNEL_KEY)
+ addend += 4;
+ if (o_flags&TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
#endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
new file mode 100644
index 00000000000..734d9b5f577
--- /dev/null
+++ b/include/net/gro_cells.h
@@ -0,0 +1,107 @@
+#ifndef _NET_GRO_CELLS_H
+#define _NET_GRO_CELLS_H
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+struct gro_cell {
+ struct sk_buff_head napi_skbs;
+ struct napi_struct napi;
+} ____cacheline_aligned_in_smp;
+
+struct gro_cells {
+ unsigned int gro_cells_mask;
+ struct gro_cell *cells;
+};
+
+static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+{
+ struct gro_cell *cell = gcells->cells;
+ struct net_device *dev = skb->dev;
+
+ if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+ netif_rx(skb);
+ return;
+ }
+
+ if (skb_rx_queue_recorded(skb))
+ cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+
+ if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return;
+ }
+
+ /* We run in BH context */
+ spin_lock(&cell->napi_skbs.lock);
+
+ __skb_queue_tail(&cell->napi_skbs, skb);
+ if (skb_queue_len(&cell->napi_skbs) == 1)
+ napi_schedule(&cell->napi);
+
+ spin_unlock(&cell->napi_skbs.lock);
+}
+
+/* called unser BH context */
+static inline int gro_cell_poll(struct napi_struct *napi, int budget)
+{
+ struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
+ struct sk_buff *skb;
+ int work_done = 0;
+
+ spin_lock(&cell->napi_skbs.lock);
+ while (work_done < budget) {
+ skb = __skb_dequeue(&cell->napi_skbs);
+ if (!skb)
+ break;
+ spin_unlock(&cell->napi_skbs.lock);
+ napi_gro_receive(napi, skb);
+ work_done++;
+ spin_lock(&cell->napi_skbs.lock);
+ }
+
+ if (work_done < budget)
+ napi_complete(napi);
+ spin_unlock(&cell->napi_skbs.lock);
+ return work_done;
+}
+
+static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
+{
+ int i;
+
+ gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
+ sizeof(struct gro_cell),
+ GFP_KERNEL);
+ if (!gcells->cells)
+ return -ENOMEM;
+
+ for (i = 0; i <= gcells->gro_cells_mask; i++) {
+ struct gro_cell *cell = gcells->cells + i;
+
+ skb_queue_head_init(&cell->napi_skbs);
+ netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
+ napi_enable(&cell->napi);
+ }
+ return 0;
+}
+
+static inline void gro_cells_destroy(struct gro_cells *gcells)
+{
+ struct gro_cell *cell = gcells->cells;
+ int i;
+
+ if (!cell)
+ return;
+ for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+ netif_napi_del(&cell->napi);
+ skb_queue_purge(&cell->napi_skbs);
+ }
+ kfree(gcells->cells);
+ gcells->cells = NULL;
+}
+
+#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6e991e0d0d6..970028e1338 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -25,27 +25,24 @@
struct icmp_err {
int errno;
- unsigned fatal:1;
+ unsigned int fatal:1;
};
extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry;
struct net_proto_family;
struct sk_buff;
struct net;
-extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
-extern int icmp_rcv(struct sk_buff *skb);
-extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int icmp_init(void);
-extern void icmp_out_count(struct net *net, unsigned char type);
-
-/* Move into dst.h ? */
-extern int xrlim_allow(struct dst_entry *dst, int timeout);
+void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+int icmp_rcv(struct sk_buff *skb);
+void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index af49f8ab7f8..b0fd9476c53 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -178,6 +178,18 @@ struct ieee80211_radiotap_header {
*
* Number of unicast retries a transmitted frame used.
*
+ * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless
+ *
+ * Contains a bitmap of known fields/flags, the flags, and
+ * the MCS index.
+ *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless
+ *
+ * Contains the AMPDU information for the subframe.
+ *
+ * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
+ *
+ * Contains VHT information about this frame.
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -199,6 +211,10 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_RTS_RETRIES = 16,
IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+ IEEE80211_RADIOTAP_MCS = 19,
+ IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
+ IEEE80211_RADIOTAP_VHT = 21,
+
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
@@ -214,6 +230,10 @@ enum ieee80211_radiotap_type {
#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */
#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */
#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */
+#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */
+#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */
+#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */
/* For IEEE80211_RADIOTAP_FLAGS */
#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
@@ -244,14 +264,62 @@ enum ieee80211_radiotap_type {
* retries */
#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
+#define IEEE80211_RADIOTAP_F_TX_NOACK 0x0008 /* don't expect an ack */
+
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10
+#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03
+#define IEEE80211_RADIOTAP_MCS_BW_20 0
+#define IEEE80211_RADIOTAP_MCS_BW_40 1
+#define IEEE80211_RADIOTAP_MCS_BW_20L 2
+#define IEEE80211_RADIOTAP_MCS_BW_20U 3
+#define IEEE80211_RADIOTAP_MCS_SGI 0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
+#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60
+#define IEEE80211_RADIOTAP_MCS_STBC_1 1
+#define IEEE80211_RADIOTAP_MCS_STBC_2 2
+#define IEEE80211_RADIOTAP_MCS_STBC_3 3
+
+#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5
+
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020
+
+/* For IEEE80211_RADIOTAP_VHT */
+#define IEEE80211_RADIOTAP_VHT_KNOWN_STBC 0x0001
+#define IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA 0x0002
+#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004
+#define IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS 0x0008
+#define IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM 0x0010
+#define IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED 0x0020
+#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040
+#define IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID 0x0080
+#define IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID 0x0100
+
+#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01
+#define IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA 0x02
+#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04
+#define IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 0x08
+#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10
+#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20
-/* Ugly macro to convert literal channel numbers into their mhz equivalents
- * There are certianly some conditions that will break this (like feeding it '30')
- * but they shouldn't arise since nothing talks on channel 30. */
-#define ieee80211chan2mhz(x) \
- (((x) <= 14) ? \
- (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
- ((x) + 1000) * 5)
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h
index d52685defb1..0aa7122e8f1 100644
--- a/include/net/ieee802154.h
+++ b/include/net/ieee802154.h
@@ -21,11 +21,14 @@
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
* Maxim Osipov <maxim.osipov@siemens.com>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
#ifndef NET_IEEE802154_H
#define NET_IEEE802154_H
+#define IEEE802154_MTU 127
+
#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
@@ -39,22 +42,54 @@
(((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
} while (0)
-#define IEEE802154_FC_SECEN (1 << 3)
-#define IEEE802154_FC_FRPEND (1 << 4)
-#define IEEE802154_FC_ACK_REQ (1 << 5)
-#define IEEE802154_FC_INTRA_PAN (1 << 6)
+#define IEEE802154_FC_SECEN_SHIFT 3
+#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT)
+#define IEEE802154_FC_FRPEND_SHIFT 4
+#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT)
+#define IEEE802154_FC_ACK_REQ_SHIFT 5
+#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT)
+#define IEEE802154_FC_INTRA_PAN_SHIFT 6
+#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
#define IEEE802154_FC_SAMODE_SHIFT 14
#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT)
#define IEEE802154_FC_DAMODE_SHIFT 10
#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT)
+#define IEEE802154_FC_VERSION_SHIFT 12
+#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT)
+#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
+
#define IEEE802154_FC_SAMODE(x) \
(((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
#define IEEE802154_FC_DAMODE(x) \
(((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
+#define IEEE802154_SCF_SECLEVEL_MASK 7
+#define IEEE802154_SCF_SECLEVEL_SHIFT 0
+#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK)
+#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3
+#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+#define IEEE802154_SCF_KEY_ID_MODE(x) \
+ ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+
+#define IEEE802154_SCF_KEY_IMPLICIT 0
+#define IEEE802154_SCF_KEY_INDEX 1
+#define IEEE802154_SCF_KEY_SHORT_INDEX 2
+#define IEEE802154_SCF_KEY_HW_INDEX 3
+
+#define IEEE802154_SCF_SECLEVEL_NONE 0
+#define IEEE802154_SCF_SECLEVEL_MIC32 1
+#define IEEE802154_SCF_SECLEVEL_MIC64 2
+#define IEEE802154_SCF_SECLEVEL_MIC128 3
+#define IEEE802154_SCF_SECLEVEL_ENC 4
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
+
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE 2 /* 2 octets */
/* MAC's Command Frames Identifiers */
#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 57430555487..3b53c8e405e 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -1,7 +1,7 @@
/*
* An interface between IEEE802.15.4 device and rest of the kernel.
*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
+ * Copyright (C) 2007-2012 Siemens AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -21,60 +21,378 @@
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
* Maxim Osipov <maxim.osipov@siemens.com>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
-/*
- * A control block of skb passed between the ARPHRD_IEEE802154 device
- * and other stack parts.
- */
-struct ieee802154_mac_cb {
- u8 lqi;
- struct ieee802154_addr sa;
- struct ieee802154_addr da;
- u8 flags;
+#include <net/ieee802154.h>
+#include <net/af_ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+struct ieee802154_sechdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 level:3,
+ key_id_mode:2,
+ reserved:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:3,
+ key_id_mode:2,
+ level:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 key_id;
+ __le32 frame_counter;
+ union {
+ __le32 short_src;
+ __le64 extended_src;
+ };
+};
+
+struct ieee802154_addr {
+ u8 mode;
+ __le16 pan_id;
+ union {
+ __le16 short_addr;
+ __le64 extended_addr;
+ };
+};
+
+struct ieee802154_hdr_fc {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:3,
+ security_enabled:1,
+ frame_pending:1,
+ ack_request:1,
+ intra_pan:1,
+ reserved:3,
+ dest_addr_mode:2,
+ version:2,
+ source_addr_mode:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 reserved:1,
+ intra_pan:1,
+ ack_request:1,
+ frame_pending:1,
+ security_enabled:1,
+ type:3,
+ source_addr_mode:2,
+ version:2,
+ dest_addr_mode:2,
+ reserved2:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct ieee802154_hdr {
+ struct ieee802154_hdr_fc fc;
u8 seq;
+ struct ieee802154_addr source;
+ struct ieee802154_addr dest;
+ struct ieee802154_sechdr sec;
};
-static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
+/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
+ * the contents of hdr will be, and the actual value of those bits in
+ * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
+ * version, if SECEN is set.
+ */
+int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr);
+
+/* pulls the entire 802.15.4 header off of the skb, including the security
+ * header, and performs pan id decompression
+ */
+int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+/* parses the frame control, sequence number of address fields in a given skb
+ * and stores them into hdr, performing pan id decompression and length checks
+ * to be suitable for use in header_ops.parse
+ */
+int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
+ struct ieee802154_hdr *hdr);
+
+/* parses the full 802.15.4 header a given skb and stores them into hdr,
+ * performing pan id decompression and length checks to be suitable for use in
+ * header_ops.parse
+ */
+int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
+
+static inline int
+ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec)
{
- return (struct ieee802154_mac_cb *)skb->cb;
+ switch (sec->level) {
+ case IEEE802154_SCF_SECLEVEL_MIC32:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC32:
+ return 4;
+ case IEEE802154_SCF_SECLEVEL_MIC64:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC64:
+ return 8;
+ case IEEE802154_SCF_SECLEVEL_MIC128:
+ case IEEE802154_SCF_SECLEVEL_ENC_MIC128:
+ return 16;
+ case IEEE802154_SCF_SECLEVEL_NONE:
+ case IEEE802154_SCF_SECLEVEL_ENC:
+ default:
+ return 0;
+ }
}
-#define MAC_CB_FLAG_TYPEMASK ((1 << 3) - 1)
+static inline int ieee802154_hdr_length(struct sk_buff *skb)
+{
+ struct ieee802154_hdr hdr;
+ int len = ieee802154_hdr_pull(skb, &hdr);
-#define MAC_CB_FLAG_ACKREQ (1 << 3)
-#define MAC_CB_FLAG_SECEN (1 << 4)
-#define MAC_CB_FLAG_INTRAPAN (1 << 5)
+ if (len > 0)
+ skb_push(skb, len);
-static inline int mac_cb_is_ackreq(struct sk_buff *skb)
+ return len;
+}
+
+static inline bool ieee802154_addr_equal(const struct ieee802154_addr *a1,
+ const struct ieee802154_addr *a2)
{
- return mac_cb(skb)->flags & MAC_CB_FLAG_ACKREQ;
+ if (a1->pan_id != a2->pan_id || a1->mode != a2->mode)
+ return false;
+
+ if ((a1->mode == IEEE802154_ADDR_LONG &&
+ a1->extended_addr != a2->extended_addr) ||
+ (a1->mode == IEEE802154_ADDR_SHORT &&
+ a1->short_addr != a2->short_addr))
+ return false;
+
+ return true;
+}
+
+static inline __le64 ieee802154_devaddr_from_raw(const void *raw)
+{
+ u64 temp;
+
+ memcpy(&temp, raw, IEEE802154_ADDR_LEN);
+ return (__force __le64)swab64(temp);
}
-static inline int mac_cb_is_secen(struct sk_buff *skb)
+static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
{
- return mac_cb(skb)->flags & MAC_CB_FLAG_SECEN;
+ u64 temp = swab64((__force u64)addr);
+
+ memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
-static inline int mac_cb_is_intrapan(struct sk_buff *skb)
+static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
+ const struct ieee802154_addr_sa *sa)
{
- return mac_cb(skb)->flags & MAC_CB_FLAG_INTRAPAN;
+ a->mode = sa->addr_type;
+ a->pan_id = cpu_to_le16(sa->pan_id);
+
+ switch (a->mode) {
+ case IEEE802154_ADDR_SHORT:
+ a->short_addr = cpu_to_le16(sa->short_addr);
+ break;
+ case IEEE802154_ADDR_LONG:
+ a->extended_addr = ieee802154_devaddr_from_raw(sa->hwaddr);
+ break;
+ }
}
-static inline int mac_cb_type(struct sk_buff *skb)
+static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
+ const struct ieee802154_addr *a)
{
- return mac_cb(skb)->flags & MAC_CB_FLAG_TYPEMASK;
+ sa->addr_type = a->mode;
+ sa->pan_id = le16_to_cpu(a->pan_id);
+
+ switch (a->mode) {
+ case IEEE802154_ADDR_SHORT:
+ sa->short_addr = le16_to_cpu(a->short_addr);
+ break;
+ case IEEE802154_ADDR_LONG:
+ ieee802154_devaddr_to_raw(sa->hwaddr, a->extended_addr);
+ break;
+ }
+}
+
+/*
+ * A control block of skb passed between the ARPHRD_IEEE802154 device
+ * and other stack parts.
+ */
+struct ieee802154_mac_cb {
+ u8 lqi;
+ u8 type;
+ bool ackreq;
+ bool secen;
+ bool secen_override;
+ u8 seclevel;
+ bool seclevel_override;
+ struct ieee802154_addr source;
+ struct ieee802154_addr dest;
+};
+
+static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
+{
+ return (struct ieee802154_mac_cb *)skb->cb;
+}
+
+static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
+
+ memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
+ return mac_cb(skb);
}
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key_id {
+ u8 mode;
+ u8 id;
+ union {
+ struct ieee802154_addr device_addr;
+ __le32 short_source;
+ __le64 extended_source;
+ };
+};
+
+struct ieee802154_llsec_key {
+ u8 frame_types;
+ u32 cmd_frame_ids;
+ u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+ struct list_head list;
+
+ struct ieee802154_llsec_key_id id;
+ struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_device_key {
+ struct list_head list;
+
+ struct ieee802154_llsec_key_id key_id;
+ u32 frame_counter;
+};
+
+enum {
+ IEEE802154_LLSEC_DEVKEY_IGNORE,
+ IEEE802154_LLSEC_DEVKEY_RESTRICT,
+ IEEE802154_LLSEC_DEVKEY_RECORD,
+
+ __IEEE802154_LLSEC_DEVKEY_MAX,
+};
+
+struct ieee802154_llsec_device {
+ struct list_head list;
+
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 hwaddr;
+ u32 frame_counter;
+ bool seclevel_exempt;
+
+ u8 key_mode;
+ struct list_head keys;
+};
+
+struct ieee802154_llsec_seclevel {
+ struct list_head list;
+
+ u8 frame_type;
+ u8 cmd_frame_id;
+ bool device_override;
+ u32 sec_levels;
+};
+
+struct ieee802154_llsec_params {
+ bool enabled;
+
+ __be32 frame_counter;
+ u8 out_level;
+ struct ieee802154_llsec_key_id out_key;
+
+ __le64 default_key_source;
+
+ __le16 pan_id;
+ __le64 hwaddr;
+ __le64 coord_hwaddr;
+ __le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+ struct list_head keys;
+ struct list_head devices;
+ struct list_head security_levels;
+};
+
#define IEEE802154_MAC_SCAN_ED 0
#define IEEE802154_MAC_SCAN_ACTIVE 1
#define IEEE802154_MAC_SCAN_PASSIVE 2
#define IEEE802154_MAC_SCAN_ORPHAN 3
+struct ieee802154_mac_params {
+ s8 transmit_power;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+ u8 cca_mode;
+ s32 cca_ed_level;
+};
+
struct wpan_phy;
+
+enum {
+ IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
+ IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
+ IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
+ IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
+ IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
+ IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
+ IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
+ IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
+ IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+};
+
+struct ieee802154_llsec_ops {
+ int (*get_params)(struct net_device *dev,
+ struct ieee802154_llsec_params *params);
+ int (*set_params)(struct net_device *dev,
+ const struct ieee802154_llsec_params *params,
+ int changed);
+
+ int (*add_key)(struct net_device *dev,
+ const struct ieee802154_llsec_key_id *id,
+ const struct ieee802154_llsec_key *key);
+ int (*del_key)(struct net_device *dev,
+ const struct ieee802154_llsec_key_id *id);
+
+ int (*add_dev)(struct net_device *dev,
+ const struct ieee802154_llsec_device *llsec_dev);
+ int (*del_dev)(struct net_device *dev, __le64 dev_addr);
+
+ int (*add_devkey)(struct net_device *dev,
+ __le64 device_addr,
+ const struct ieee802154_llsec_device_key *key);
+ int (*del_devkey)(struct net_device *dev,
+ __le64 device_addr,
+ const struct ieee802154_llsec_device_key *key);
+
+ int (*add_seclevel)(struct net_device *dev,
+ const struct ieee802154_llsec_seclevel *sl);
+ int (*del_seclevel)(struct net_device *dev,
+ const struct ieee802154_llsec_seclevel *sl);
+
+ void (*lock_table)(struct net_device *dev);
+ void (*get_table)(struct net_device *dev,
+ struct ieee802154_llsec_table **t);
+ void (*unlock_table)(struct net_device *dev);
+};
/*
* This should be located at net_device->ml_priv
*
@@ -82,12 +400,14 @@ struct wpan_phy;
* Use wpan_wpy_put to put that reference.
*/
struct ieee802154_mlme_ops {
+ /* The following fields are optional (can be NULL). */
+
int (*assoc_req)(struct net_device *dev,
struct ieee802154_addr *addr,
u8 channel, u8 page, u8 cap);
int (*assoc_resp)(struct net_device *dev,
struct ieee802154_addr *addr,
- u16 short_addr, u8 status);
+ __le16 short_addr, u8 status);
int (*disassoc_req)(struct net_device *dev,
struct ieee802154_addr *addr,
u8 reason);
@@ -98,24 +418,46 @@ struct ieee802154_mlme_ops {
int (*scan_req)(struct net_device *dev,
u8 type, u32 channels, u8 page, u8 duration);
+ int (*set_mac_params)(struct net_device *dev,
+ const struct ieee802154_mac_params *params);
+ void (*get_mac_params)(struct net_device *dev,
+ struct ieee802154_mac_params *params);
+
+ struct ieee802154_llsec_ops *llsec;
+
+ /* The fields below are required. */
+
struct wpan_phy *(*get_phy)(const struct net_device *dev);
/*
* FIXME: these should become the part of PIB/MIB interface.
* However we still don't have IB interface of any kind
*/
- u16 (*get_pan_id)(const struct net_device *dev);
- u16 (*get_short_addr)(const struct net_device *dev);
+ __le16 (*get_pan_id)(const struct net_device *dev);
+ __le16 (*get_short_addr)(const struct net_device *dev);
u8 (*get_dsn)(const struct net_device *dev);
- u8 (*get_bsn)(const struct net_device *dev);
};
-static inline struct ieee802154_mlme_ops *ieee802154_mlme_ops(
- const struct net_device *dev)
+/* The IEEE 802.15.4 standard defines 2 type of the devices:
+ * - FFD - full functionality device
+ * - RFD - reduce functionality device
+ *
+ * So 2 sets of mlme operations are needed
+ */
+struct ieee802154_reduced_mlme_ops {
+ struct wpan_phy *(*get_phy)(const struct net_device *dev);
+};
+
+static inline struct ieee802154_mlme_ops *
+ieee802154_mlme_ops(const struct net_device *dev)
{
return dev->ml_priv;
}
-#endif
-
+static inline struct ieee802154_reduced_mlme_ops *
+ieee802154_reduced_mlme_ops(const struct net_device *dev)
+{
+ return dev->ml_priv;
+}
+#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 04977eefb0e..b4956a5fcc3 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -30,11 +30,11 @@
#define IF_PREFIX_ONLINK 0x01
#define IF_PREFIX_AUTOCONF 0x02
-#ifdef __KERNEL__
-
enum {
+ INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
+ INET6_IFADDR_STATE_ERRDAD,
INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
@@ -43,6 +43,7 @@ struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
+ /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
@@ -51,15 +52,15 @@ struct inet6_ifaddr {
int state;
- __u8 probes;
- __u8 flags;
+ __u32 flags;
+ __u8 dad_probes;
__u16 scope;
unsigned long cstamp; /* created timestamp */
unsigned long tstamp; /* updated timestamp */
- struct timer_list timer;
+ struct delayed_work dad_work;
struct inet6_dev *idev;
struct rt6_info *rt;
@@ -67,12 +68,14 @@ struct inet6_ifaddr {
struct hlist_node addr_lst;
struct list_head if_list;
-#ifdef CONFIG_IPV6_PRIVACY
struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
int regen_count;
-#endif
+
+ bool tokenized;
+
struct rcu_head rcu;
+ struct in6_addr peer_addr;
};
struct ip6_sf_socklist {
@@ -121,7 +124,7 @@ struct ifmcaddr6 {
unsigned char mca_crcount;
unsigned long mca_sfcount[2];
struct timer_list mca_timer;
- unsigned mca_flags;
+ unsigned int mca_flags;
int mca_users;
atomic_t mca_refcnt;
spinlock_t mca_lock;
@@ -156,8 +159,8 @@ struct ifacaddr6 {
struct ipv6_devstat {
struct proc_dir_entry *proc_dir_entry;
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6);
- DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6);
- DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6_mib_device, icmpv6dev);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib_device, icmpv6msgdev);
};
struct inet6_dev {
@@ -168,13 +171,20 @@ struct inet6_dev {
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
spinlock_t mc_lock;
- unsigned char mc_qrv;
+
+ unsigned char mc_qrv; /* Query Robustness Variable */
unsigned char mc_gq_running;
unsigned char mc_ifc_count;
- unsigned long mc_v1_seen;
+ unsigned char mc_dad_count;
+
+ unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */
+ unsigned long mc_qi; /* Query Interval */
+ unsigned long mc_qri; /* Query Response Interval */
unsigned long mc_maxdelay;
+
struct timer_list mc_gq_timer; /* general query timer */
struct timer_list mc_ifc_timer; /* interface change timer */
+ struct timer_list mc_dad_timer; /* dad complete mc timer */
struct ifacaddr6 *ac_list;
rwlock_t lock;
@@ -182,21 +192,24 @@ struct inet6_dev {
__u32 if_flags;
int dead;
-#ifdef CONFIG_IPV6_PRIVACY
u8 rndid[8];
struct timer_list regen_timer;
struct list_head tempaddr_list;
-#endif
+
+ struct in6_addr token;
struct neigh_parms *nd_parms;
- struct inet6_dev *next;
struct ipv6_devconf cnf;
struct ipv6_devstat stats;
+
+ struct timer_list rs_timer;
+ __u8 rs_probes;
+
unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
struct rcu_head rcu;
};
-static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf)
+static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
{
/*
* +-------+-------+-------+-------+-------+-------+
@@ -210,60 +223,6 @@ static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf)
memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32));
}
-static inline void ipv6_tr_mc_map(struct in6_addr *addr, char *buf)
-{
- /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */
-
- if (((addr->s6_addr[0] == 0xFF) &&
- ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) &&
- (addr->s6_addr16[1] == 0) &&
- (addr->s6_addr32[1] == 0) &&
- (addr->s6_addr32[2] == 0) &&
- (addr->s6_addr16[6] == 0) &&
- (addr->s6_addr[15] == 1)) ||
- ((addr->s6_addr[0] == 0xFF) &&
- (addr->s6_addr[1] == 0x02) &&
- (addr->s6_addr16[1] == 0) &&
- (addr->s6_addr32[1] == 0) &&
- (addr->s6_addr16[4] == 0) &&
- (addr->s6_addr[10] == 0) &&
- (addr->s6_addr[11] == 1) &&
- (addr->s6_addr[12] == 0xff)))
- {
- buf[0]=0xC0;
- buf[1]=0x00;
- buf[2]=0x01;
- buf[3]=0x00;
- buf[4]=0x00;
- buf[5]=0x00;
- /* All routers FF0x::2 */
- } else if ((addr->s6_addr[0] ==0xff) &&
- ((addr->s6_addr[1] & 0xF0) == 0) &&
- (addr->s6_addr16[1] == 0) &&
- (addr->s6_addr32[1] == 0) &&
- (addr->s6_addr32[2] == 0) &&
- (addr->s6_addr16[6] == 0) &&
- (addr->s6_addr[15] == 2))
- {
- buf[0]=0xC0;
- buf[1]=0x00;
- buf[2]=0x02;
- buf[3]=0x00;
- buf[4]=0x00;
- buf[5]=0x00;
- } else {
- unsigned char i ;
-
- i = addr->s6_addr[15] & 7 ;
- buf[0]=0xC0;
- buf[1]=0x00;
- buf[2]=0x00;
- buf[3]=0x01 << i ;
- buf[4]=0x00;
- buf[5]=0x00;
- }
-}
-
static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf)
{
buf[0] = 0x00;
@@ -286,5 +245,20 @@ static inline void ipv6_ib_mc_map(const struct in6_addr *addr,
buf[9] = broadcast[9];
memcpy(buf + 10, addr->s6_addr + 6, 10);
}
-#endif
+
+static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr,
+ const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) {
+ memcpy(buf, broadcast, 4);
+ } else {
+ /* v4mapped? */
+ if ((addr->s6_addr32[0] | addr->s6_addr32[1] |
+ (addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0)
+ return -EINVAL;
+ memcpy(buf, &addr->s6_addr32[3], 4);
+ }
+ return 0;
+}
+
#endif
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index ff013505236..74af137304b 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -22,24 +22,25 @@ struct sk_buff;
struct sock;
struct sockaddr;
-extern int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb);
+int inet6_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb, bool relax);
-extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
- const struct request_sock *req);
+struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+ const struct request_sock *req);
-extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
- const __be16 rport,
- const struct in6_addr *raddr,
- const struct in6_addr *laddr,
- const int iif);
+struct request_sock *inet6_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+ const __be16 rport,
+ const struct in6_addr *raddr,
+ const struct in6_addr *laddr,
+ const int iif);
-extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
- struct request_sock *req,
- const unsigned long timeout);
+void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ const unsigned long timeout);
-extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-extern int inet6_csk_xmit(struct sk_buff *skb);
+int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index e46674d5dae..ae061354430 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -15,7 +15,7 @@
#define _INET6_HASHTABLES_H
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/types.h>
@@ -28,32 +28,17 @@
struct inet_hashinfo;
-/* I have no idea if this is a good hash for v6 or not. -DaveM */
-static inline unsigned int inet6_ehashfn(struct net *net,
- const struct in6_addr *laddr, const u16 lport,
- const struct in6_addr *faddr, const __be16 fport)
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+ const u16 lport,
+ const u32 fhash,
+ const __be16 fport,
+ const u32 initval)
{
- u32 ports = (lport ^ (__force u16)fport);
-
- return jhash_3words((__force u32)laddr->s6_addr32[3],
- (__force u32)faddr->s6_addr32[3],
- ports, inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet6_sk_ehashfn(const struct sock *sk)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const struct ipv6_pinfo *np = inet6_sk(sk);
- const struct in6_addr *laddr = &np->rcv_saddr;
- const struct in6_addr *faddr = &np->daddr;
- const __u16 lport = inet->inet_num;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet6_ehashfn(net, laddr, lport, faddr, fport);
+ const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+ return jhash_3words(lhash, fhash, ports, initval);
}
-extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
@@ -61,19 +46,19 @@ extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
*
* The sockhash lock must be held as a reader here.
*/
-extern struct sock *__inet6_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr,
- const __be16 sport,
- const struct in6_addr *daddr,
- const u16 hnum,
- const int dif);
-
-extern struct sock *inet6_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct in6_addr *daddr,
- const unsigned short hnum,
- const int dif);
+struct sock *__inet6_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum, const int dif);
+
+struct sock *inet6_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const unsigned short hnum, const int dif);
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -88,7 +73,8 @@ static inline struct sock *__inet6_lookup(struct net *net,
if (sk)
return sk;
- return inet6_lookup_listener(net, hashinfo, daddr, hnum, dif);
+ return inet6_lookup_listener(net, hashinfo, saddr, sport,
+ daddr, hnum, dif);
}
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
@@ -96,19 +82,20 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
const __be16 sport,
const __be16 dport)
{
- struct sock *sk;
+ struct sock *sk = skb_steal_sock(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
+ if (sk)
return sk;
- else return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
- &ipv6_hdr(skb)->saddr, sport,
- &ipv6_hdr(skb)->daddr, ntohs(dport),
- inet6_iif(skb));
+
+ return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
+ &ipv6_hdr(skb)->saddr, sport,
+ &ipv6_hdr(skb)->daddr, ntohs(dport),
+ inet6_iif(skb));
}
-extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr, const __be16 sport,
- const struct in6_addr *daddr, const __be16 dport,
- const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr, const __be16 sport,
+ const struct in6_addr *daddr, const __be16 dport,
+ const int dif);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 22fac9892b1..fe7994c48b7 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -13,28 +13,30 @@ struct sock;
struct sockaddr;
struct socket;
-extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
- int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
- int addr_len, int flags);
-extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size);
-extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
-extern int inet_shutdown(struct socket *sock, int how);
-extern int inet_listen(struct socket *sock, int backlog);
-extern void inet_sock_destruct(struct sock *sk);
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-extern int inet_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer);
-extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
- unsigned short type, unsigned char protocol,
- struct net *net);
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size, int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+ int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ unsigned short type, unsigned char protocol,
+ struct net *net);
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 6c93a56cc95..7a431388756 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -36,15 +36,16 @@ struct tcp_congestion_ops;
* (i.e. things that depend on the address family)
*/
struct inet_connection_sock_af_ops {
- int (*queue_xmit)(struct sk_buff *skb);
+ int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
+ void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
- struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
u16 net_header_len;
+ u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
@@ -60,7 +61,7 @@ struct inet_connection_sock_af_ops {
#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb);
+ const struct inet_bind_bucket *tb, bool relax);
};
/** inet_connection_sock - INET connection oriented sock
@@ -132,7 +133,8 @@ struct inet_connection_sock {
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
#define ICSK_TIME_DACK 2 /* Delayed ack timer */
#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
-#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
+#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
+#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{
@@ -144,9 +146,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
return (void *)inet_csk(sk)->icsk_ca_priv;
}
-extern struct sock *inet_csk_clone(struct sock *sk,
- const struct request_sock *req,
- const gfp_t priority);
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority);
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
@@ -155,11 +157,11 @@ enum inet_csk_ack_state_t {
ICSK_ACK_PUSHED2 = 8
};
-extern void inet_csk_init_xmit_timers(struct sock *sk,
- void (*retransmit_handler)(unsigned long),
- void (*delack_handler)(unsigned long),
- void (*keepalive_handler)(unsigned long));
-extern void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_init_xmit_timers(struct sock *sk,
+ void (*retransmit_handler)(unsigned long),
+ void (*delack_handler)(unsigned long),
+ void (*keepalive_handler)(unsigned long));
+void inet_csk_clear_xmit_timers(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{
@@ -176,8 +178,8 @@ static inline void inet_csk_delack_init(struct sock *sk)
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
-extern void inet_csk_delete_keepalive_timer(struct sock *sk);
-extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
#ifdef INET_CSK_DEBUG
extern const char inet_csk_timer_bug_msg[];
@@ -222,7 +224,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
when = max_when;
}
- if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+ if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
+ what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
@@ -238,18 +241,20 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif
}
-extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-extern struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
- const __be16 rport,
- const __be32 raddr,
- const __be32 laddr);
-extern int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb);
-extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+ const __be16 rport,
+ const __be32 raddr,
+ const __be32 laddr);
+int inet_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb, bool relax);
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
-extern struct dst_entry* inet_csk_route_req(struct sock *sk,
+struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+ const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
const struct request_sock *req);
static inline void inet_csk_reqsk_queue_add(struct sock *sk,
@@ -259,9 +264,8 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
}
-extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
- struct request_sock *req,
- unsigned long timeout);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout);
static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
struct request_sock *req)
@@ -308,12 +312,13 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
reqsk_free(req);
}
-extern void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto);
+void inet_csk_reqsk_queue_prune(struct sock *parent,
+ const unsigned long interval,
+ const unsigned long timeout,
+ const unsigned long max_rto);
-extern void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
@@ -324,13 +329,15 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
(POLLIN | POLLRDNORM) : 0;
}
-extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
-extern void inet_csk_listen_stop(struct sock *sk);
+int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+void inet_csk_listen_stop(struct sock *sk);
+
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
-extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 88bdd010d65..84b20835b73 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -15,6 +15,8 @@ enum {
INET_ECN_MASK = 3,
};
+extern int sysctl_tunnel_ecn_log;
+
static inline int INET_ECN_is_ce(__u8 dsfield)
{
return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
@@ -30,6 +32,14 @@ static inline int INET_ECN_is_capable(__u8 dsfield)
return dsfield & INET_ECN_ECT_0;
}
+/*
+ * RFC 3168 9.1.1
+ * The full-functionality option for ECN encapsulation is to copy the
+ * ECN codepoint of the inside header to the outside header on
+ * encapsulation if the inside header is not-ECT or ECT, and to set the
+ * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
+ * the inside header is CE.
+ */
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
outer &= ~INET_ECN_MASK;
@@ -38,9 +48,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define INET_ECN_dontxmit(sk) \
- do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+ inet_sk(sk)->tos |= INET_ECN_ECT_0;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+ inet_sk(sk)->tos &= ~INET_ECN_MASK;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
#define IP6_ECN_flow_init(label) do { \
(label) &= ~htonl(INET_ECN_MASK << 20); \
@@ -114,12 +134,14 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
{
switch (skb->protocol) {
case cpu_to_be16(ETH_P_IP):
- if (skb->network_header + sizeof(struct iphdr) <= skb->tail)
+ if (skb_network_header(skb) + sizeof(struct iphdr) <=
+ skb_tail_pointer(skb))
return IP_ECN_set_ce(ip_hdr(skb));
break;
case cpu_to_be16(ETH_P_IPV6):
- if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail)
+ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ skb_tail_pointer(skb))
return IP6_ECN_set_ce(ipv6_hdr(skb));
break;
}
@@ -127,4 +149,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+/*
+ * RFC 6040 4.2
+ * To decapsulate the inner header at the tunnel egress, a compliant
+ * tunnel egress MUST set the outgoing ECN field to the codepoint at the
+ * intersection of the appropriate arriving inner header (row) and outer
+ * header (column) in Figure 4
+ *
+ * +---------+------------------------------------------------+
+ * |Arriving | Arriving Outer Header |
+ * | Inner +---------+------------+------------+------------+
+ * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
+ * +---------+---------+------------+------------+------------+
+ * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
+ * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE |
+ * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE |
+ * | CE | CE | CE | CE(!!!)| CE |
+ * +---------+---------+------------+------------+------------+
+ *
+ * Figure 4: New IP in IP Decapsulation Behaviour
+ *
+ * returns 0 on success
+ * 1 if something is broken and should be logged (!!! above)
+ * 2 if packet should be dropped
+ */
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+ __u8 outer, __u8 inner)
+{
+ if (INET_ECN_is_not_ect(inner)) {
+ switch (outer & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0;
+ case INET_ECN_ECT_0:
+ case INET_ECN_ECT_1:
+ return 1;
+ case INET_ECN_CE:
+ return 2;
+ }
+ }
+
+ if (INET_ECN_is_ce(outer))
+ INET_ECN_set_ce(skb);
+
+ return 0;
+}
+
+static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
+ struct sk_buff *skb)
+{
+ __u8 inner;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ inner = ip_hdr(skb)->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner = ipv6_get_dsfield(ipv6_hdr(skb));
+ else
+ return 0;
+
+ return INET_ECN_decapsulate(skb, oiph->tos, inner);
+}
+
+static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
+ struct sk_buff *skb)
+{
+ __u8 inner;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ inner = ip_hdr(skb)->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner = ipv6_get_dsfield(ipv6_hdr(skb));
+ else
+ return 0;
+
+ return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
+}
#endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 16ff29a7bb3..6f59de98dab 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,10 +1,17 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
+#include <linux/percpu_counter.h>
+
struct netns_frags {
int nqueues;
- atomic_t mem;
struct list_head lru_list;
+ spinlock_t lru_lock;
+
+ /* The percpu_counter "mem" need to be cacheline aligned.
+ * mem.count must not share cacheline with other writers
+ */
+ struct percpu_counter mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
@@ -13,12 +20,11 @@ struct netns_frags {
};
struct inet_frag_queue {
- struct hlist_node list;
- struct netns_frags *net;
- struct list_head lru_list; /* lru list member */
spinlock_t lock;
- atomic_t refcnt;
struct timer_list timer; /* when will this queue expire? */
+ struct list_head lru_list; /* lru list member */
+ struct hlist_node list;
+ atomic_t refcnt;
struct sk_buff *fragments; /* list of received fragments */
struct sk_buff *fragments_tail;
ktime_t stamp;
@@ -29,25 +35,48 @@ struct inet_frag_queue {
#define INET_FRAG_COMPLETE 4
#define INET_FRAG_FIRST_IN 2
#define INET_FRAG_LAST_IN 1
+
+ u16 max_size;
+
+ struct netns_frags *net;
};
-#define INETFRAGS_HASHSZ 64
+#define INETFRAGS_HASHSZ 1024
+
+/* averaged:
+ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
+ * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
+ * struct frag_queue))
+ */
+#define INETFRAGS_MAXDEPTH 128
+
+struct inet_frag_bucket {
+ struct hlist_head chain;
+ spinlock_t chain_lock;
+};
struct inet_frags {
- struct hlist_head hash[INETFRAGS_HASHSZ];
- rwlock_t lock;
- u32 rnd;
- int qsize;
+ struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
+ /* This rwlock is a global lock (seperate per IPv4, IPv6 and
+ * netfilter). Important to keep this on a seperate cacheline.
+ * Its primarily a rebuild protection rwlock.
+ */
+ rwlock_t lock ____cacheline_aligned_in_smp;
int secret_interval;
struct timer_list secret_timer;
+ /* The first call to hashfn is responsible to initialize
+ * rnd. This is best done with net_get_random_once.
+ */
+ u32 rnd;
+ int qsize;
+
unsigned int (*hashfn)(struct inet_frag_queue *);
+ bool (*match)(struct inet_frag_queue *q, void *arg);
void (*constructor)(struct inet_frag_queue *q,
void *arg);
void (*destructor)(struct inet_frag_queue *);
void (*skb_free)(struct sk_buff *);
- int (*match)(struct inet_frag_queue *q,
- void *arg);
void (*frag_expire)(unsigned long data);
};
@@ -60,10 +89,12 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
void inet_frag_destroy(struct inet_frag_queue *q,
struct inet_frags *f, int *work);
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
struct inet_frags *f, void *key, unsigned int hash)
__releases(&f->lock);
+void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+ const char *prefix);
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
@@ -71,4 +102,80 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
inet_frag_destroy(q, f, NULL);
}
+/* Memory Tracking Functions. */
+
+/* The default percpu_counter batch size is not big enough to scale to
+ * fragmentation mem acct sizes.
+ * The mem size of a 64K fragment is approx:
+ * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
+ */
+static unsigned int frag_percpu_counter_batch = 130000;
+
+static inline int frag_mem_limit(struct netns_frags *nf)
+{
+ return percpu_counter_read(&nf->mem);
+}
+
+static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
+{
+ __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
+}
+
+static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
+{
+ __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
+}
+
+static inline void init_frag_mem_limit(struct netns_frags *nf)
+{
+ percpu_counter_init(&nf->mem, 0);
+}
+
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
+{
+ int res;
+
+ local_bh_disable();
+ res = percpu_counter_sum_positive(&nf->mem);
+ local_bh_enable();
+
+ return res;
+}
+
+static inline void inet_frag_lru_move(struct inet_frag_queue *q)
+{
+ spin_lock(&q->net->lru_lock);
+ if (!list_empty(&q->lru_list))
+ list_move_tail(&q->lru_list, &q->net->lru_list);
+ spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_del(struct inet_frag_queue *q)
+{
+ spin_lock(&q->net->lru_lock);
+ list_del_init(&q->lru_list);
+ q->net->nqueues--;
+ spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_add(struct netns_frags *nf,
+ struct inet_frag_queue *q)
+{
+ spin_lock(&nf->lru_lock);
+ list_add_tail(&q->lru_list, &nf->lru_list);
+ q->net->nqueues++;
+ spin_unlock(&nf->lru_lock);
+}
+
+/* RFC 3168 support :
+ * We want to check ECN values of all fragments, do detect invalid combinations.
+ * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
+ */
+#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
+#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
+#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
+#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
+
+extern const u8 ip_frag_ecn_table[16];
+
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index e9c2ed8af86..dd1950a7e27 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -33,16 +33,15 @@
#include <net/tcp_states.h>
#include <net/netns/hash.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
*/
struct inet_ehash_bucket {
struct hlist_nulls_head chain;
- struct hlist_nulls_head twchain;
};
/* There are a few simple rules, which allow for local port reuse by
@@ -81,7 +80,9 @@ struct inet_bind_bucket {
struct net *ib_net;
#endif
unsigned short port;
- signed short fastreuse;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
int num_owners;
struct hlist_node node;
struct hlist_head owners;
@@ -92,8 +93,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
return read_pnet(&ib->ib_net);
}
-#define inet_bind_bucket_for_each(tb, pos, head) \
- hlist_for_each_entry(tb, pos, head, node)
+#define inet_bind_bucket_for_each(tb, head) \
+ hlist_for_each_entry(tb, head, node)
struct inet_bind_hashbucket {
spinlock_t lock;
@@ -121,7 +122,6 @@ struct inet_hashinfo {
*
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
- * TIME_WAIT sockets use a separate chain (twchain).
*/
struct inet_ehash_bucket *ehash;
spinlock_t *ehash_locks;
@@ -216,22 +216,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
}
}
-extern struct inet_bind_bucket *
- inet_bind_bucket_create(struct kmem_cache *cachep,
- struct net *net,
- struct inet_bind_hashbucket *head,
- const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(struct net *net,
- const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+ const int bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum);
/* These can have wildcards, don't try too hard. */
static inline int inet_lhashfn(struct net *net, const unsigned short num)
@@ -245,27 +244,30 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
}
/* Caller must disable local BH processing. */
-extern int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(struct sock *sk, struct sock *child);
-extern void inet_put_port(struct sock *sk);
+void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
-extern struct sock *__inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 daddr,
- const unsigned short hnum,
- const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr,
+ const unsigned short hnum,
+ const int dif);
static inline struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
+ __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
- return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
+ return __inet_lookup_listener(net, hashinfo, saddr, sport,
+ daddr, ntohs(dport), dif);
}
/* Socket demux engine toys. */
@@ -277,7 +279,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
On 64bit targets we combine comparisons with pair of adjacent __be32
fields in the same way.
*/
-typedef __u32 __bitwise __portpair;
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
@@ -287,42 +288,34 @@ typedef __u32 __bitwise __portpair;
#endif
#if (BITS_PER_LONG == 64)
-typedef __u64 __bitwise __addrpair;
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
(((__force __u64)(__be32)(__saddr)) << 32) | \
- ((__force __u64)(__be32)(__daddr)));
+ ((__force __u64)(__be32)(__daddr)))
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
(((__force __u64)(__be32)(__daddr)) << 32) | \
- ((__force __u64)(__be32)(__saddr)));
+ ((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
- ((*((__addrpair *)&(inet_sk(__sk)->inet_daddr))) == (__cookie)) && \
- ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
- ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
- ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_addrpair == (__cookie)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
-#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
- (inet_sk(__sk)->inet_daddr == (__saddr)) && \
- (inet_sk(__sk)->inet_rcv_saddr == (__daddr)) && \
- ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
- (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
- (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
- ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+ const int __name __deprecated __attribute__((unused))
+
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_daddr == (__saddr)) && \
+ ((__sk)->sk_rcv_saddr == (__daddr)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
/*
@@ -331,10 +324,11 @@ typedef __u64 __bitwise __addrpair;
*
* Local BH must be disabled here.
*/
-extern struct sock * __inet_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum,
+ const int dif);
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -356,7 +350,8 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sock *sk = __inet_lookup_established(net, hashinfo,
saddr, sport, daddr, hnum, dif);
- return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
+ return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport,
+ daddr, hnum, dif);
}
static inline struct sock *inet_lookup(struct net *net,
@@ -379,10 +374,10 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const __be16 sport,
const __be16 dport)
{
- struct sock *sk;
+ struct sock *sk = skb_steal_sock(skb);
const struct iphdr *iph = ip_hdr(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
+ if (sk)
return sk;
else
return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
@@ -390,13 +385,14 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
iph->daddr, dport, inet_iif(skb));
}
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk,
- u32 port_offset,
- int (*check_established)(struct inet_timewait_death_row *,
- struct sock *, __u16, struct inet_timewait_sock **),
- int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk, u32 port_offset,
+ int (*check_established)(struct inet_timewait_death_row *,
+ struct sock *, __u16,
+ struct inet_timewait_sock **),
+ int (*hash)(struct sock *sk,
+ struct inet_timewait_sock *twp));
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk);
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk);
#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8945f9fb192..b1edf17bec0 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -31,7 +31,7 @@
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
- * @is_data - Options in __data, rather than skb
+ * @nexthop - Saved nexthop address in LSRR and SSRR
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
* @is_changed - IP checksum more not valid
@@ -41,6 +41,7 @@
*/
struct ip_options {
__be32 faddr;
+ __be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
@@ -57,17 +58,26 @@ struct ip_options {
unsigned char __data[0];
};
-#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
+struct ip_options_rcu {
+ struct rcu_head rcu;
+ struct ip_options opt;
+};
+
+struct ip_options_data {
+ struct ip_options_rcu opt;
+ char data[40];
+};
struct inet_request_sock {
struct request_sock req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- u16 inet6_rsk_offset;
-#endif
- __be16 loc_port;
- __be32 loc_addr;
- __be32 rmt_addr;
- __be16 rmt_port;
+#define ir_loc_addr req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr req.__req_common.skc_daddr
+#define ir_num req.__req_common.skc_num
+#define ir_rmt_port req.__req_common.skc_dport
+#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
+#define ir_iif req.__req_common.skc_bound_dev_if
+
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
@@ -78,7 +88,9 @@ struct inet_request_sock {
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
- struct ip_options *opt;
+ struct ip_options_rcu *opt;
+ struct sk_buff *pktopts;
+ u32 ir_mark;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -86,6 +98,33 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
+static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+{
+ if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+ return skb->mark;
+ } else {
+ return sk->sk_mark;
+ }
+}
+
+struct inet_cork {
+ unsigned int flags;
+ __be32 addr;
+ struct ip_options *opt;
+ unsigned int fragsize;
+ int length; /* Total length of all frames */
+ struct dst_entry *dst;
+ u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
+};
+
+struct inet_cork_full {
+ struct inet_cork base;
+ struct flowi fl;
+};
+
struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
@@ -105,6 +144,7 @@ struct rtable;
* @tos - TOS
* @mc_ttl - Multicasting TTL
* @is_icsk - is this an inet_connection_sock?
+ * @uc_index - Unicast outgoing device index
* @mc_index - Multicast device index
* @mc_list - Group array
* @cork - info to build ip hdr on each ip frag while socket is corked
@@ -112,21 +152,23 @@ struct rtable;
struct inet_sock {
/* sk and pinet6 has to be the first two members of inet_sock */
struct sock sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *pinet6;
#endif
/* Socket demultiplex comparisons on incoming packets. */
- __be32 inet_daddr;
- __be32 inet_rcv_saddr;
- __be16 inet_dport;
- __u16 inet_num;
+#define inet_daddr sk.__sk_common.skc_daddr
+#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
+#define inet_dport sk.__sk_common.skc_dport
+#define inet_num sk.__sk_common.skc_num
+
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
__be16 inet_sport;
__u16 inet_id;
- struct ip_options *opt;
+ struct ip_options_rcu __rcu *inet_opt;
+ int rx_dst_ifindex;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
@@ -139,18 +181,12 @@ struct inet_sock {
transparent:1,
mc_all:1,
nodefrag:1;
+ __u8 rcv_tos;
+ int uc_index;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
- struct {
- unsigned int flags;
- unsigned int fragsize;
- struct ip_options *opt;
- struct dst_entry *dst;
- int length; /* Total length of all frames */
- __be32 addr;
- struct flowi fl;
- } cork;
+ struct inet_cork_full cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
@@ -168,7 +204,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
static inline void inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
@@ -176,31 +212,18 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
}
#endif
-extern int inet_sk_rebuild_header(struct sock *sk);
+int inet_sk_rebuild_header(struct sock *sk);
-extern u32 inet_ehash_secret;
-extern void build_ehash_secret(void);
-
-static inline unsigned int inet_ehashfn(struct net *net,
- const __be32 laddr, const __u16 lport,
- const __be32 faddr, const __be16 fport)
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+ const __u16 lport,
+ const __be32 faddr,
+ const __be16 fport,
+ u32 initval)
{
return jhash_3words((__force __u32) laddr,
(__force __u32) faddr,
((__u32) lport) << 16 | (__force __u32)fport,
- inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet_sk_ehashfn(const struct sock *sk)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const __be32 laddr = inet->inet_rcv_saddr;
- const __u16 lport = inet->inet_num;
- const __be32 faddr = inet->inet_daddr;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet_ehashfn(net, laddr, lport, faddr, fport);
+ initval);
}
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
@@ -218,7 +241,11 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
- return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+ __u8 flags = 0;
+
+ if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
+ flags |= FLOWI_FLAG_ANYSRC;
+ return flags;
}
#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index a066fdd50da..61474ea0215 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -18,7 +18,6 @@
#include <linux/kmemcheck.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -28,7 +27,7 @@
#include <net/tcp_states.h>
#include <net/timewait_sock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct inet_hashinfo;
@@ -59,6 +58,11 @@ struct inet_hashinfo;
# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
#endif
+static inline u32 inet_tw_time_stamp(void)
+{
+ return jiffies;
+}
+
/* TIME_WAIT reaping mechanism. */
#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
@@ -84,15 +88,9 @@ struct inet_timewait_death_row {
int sysctl_max_tw_buckets;
};
-extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(struct work_struct *work);
-extern void inet_twdr_twcal_tick(unsigned long data);
-
-#if (BITS_PER_LONG == 64)
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
-#else
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
-#endif
+void inet_twdr_hangman(unsigned long data);
+void inet_twdr_twkill_work(struct work_struct *work);
+void inet_twdr_twcal_tick(unsigned long data);
struct inet_bind_bucket;
@@ -117,40 +115,33 @@ struct inet_timewait_sock {
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
+#define tw_daddr __tw_common.skc_daddr
+#define tw_v6_daddr __tw_common.skc_v6_daddr
+#define tw_rcv_saddr __tw_common.skc_rcv_saddr
+#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
+#define tw_dport __tw_common.skc_dport
+#define tw_num __tw_common.skc_num
+
int tw_timeout;
volatile unsigned char tw_substate;
- /* 3 bits hole, try to pack */
unsigned char tw_rcv_wscale;
+
/* Socket demultiplex comparisons on incoming packets. */
- /* these five are in inet_sock */
+ /* these three are in inet_sock */
__be16 tw_sport;
- __be32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
- __be32 tw_rcv_saddr;
- __be16 tw_dport;
- __u16 tw_num;
kmemcheck_bitfield_begin(flags);
/* And these are ours. */
unsigned int tw_ipv6only : 1,
tw_transparent : 1,
- tw_pad : 14, /* 14 bits hole */
- tw_ipv6_offset : 16;
+ tw_flowlabel : 20,
+ tw_pad : 2, /* 2 bits hole */
+ tw_tos : 8;
kmemcheck_bitfield_end(flags);
- unsigned long tw_ttd;
+ u32 tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
};
-
-static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
- struct hlist_nulls_head *list)
-{
- hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
-static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
- struct hlist_head *list)
-{
- hlist_add_head(&tw->tw_bind_node, list);
-}
+#define tw_tclass tw_tos
static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
{
@@ -180,63 +171,49 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
#define inet_twsk_for_each(tw, node, head) \
hlist_nulls_for_each_entry(tw, node, head, tw_node)
-#define inet_twsk_for_each_inmate(tw, node, jail) \
- hlist_for_each_entry(tw, node, jail, tw_death_node)
+#define inet_twsk_for_each_inmate(tw, jail) \
+ hlist_for_each_entry(tw, jail, tw_death_node)
-#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
- hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
+#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
+ hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
}
-static inline __be32 inet_rcv_saddr(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- inet_sk(sk)->inet_rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
-}
-
-extern void inet_twsk_put(struct inet_timewait_sock *tw);
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
-extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+int inet_twsk_unhash(struct inet_timewait_sock *tw);
-extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo);
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
-extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
- const int state);
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ const int state);
-extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
- struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ struct inet_hashinfo *hashinfo);
-extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len);
-extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw,
+ struct inet_timewait_death_row *twdr,
+ const int timeo, const int timewait_len);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw,
+ struct inet_timewait_death_row *twdr);
-extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
- struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+ struct inet_timewait_death_row *twdr, int family);
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
-#ifdef CONFIG_NET_NS
- return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
- /* reference counting, */
- /* initialization, or RCU. */
-#else
- return &init_net;
-#endif
+ return read_pnet(&twsk->tw_net);
}
static inline
void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
{
-#ifdef CONFIG_NET_NS
- rcu_assign_pointer(twsk->tw_net, net);
-#endif
+ write_pnet(&twsk->tw_net, net);
}
#endif /* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 599d96e7411..01d590ee5e7 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,15 +11,20 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
#include <net/ipv6.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
-struct inetpeer_addr {
+struct inetpeer_addr_base {
union {
- __be32 a4;
- __be32 a6[4];
+ __be32 a4;
+ __be32 a6[4];
};
- __u16 family;
+};
+
+struct inetpeer_addr {
+ struct inetpeer_addr_base addr;
+ __u16 family;
};
struct inet_peer {
@@ -27,68 +32,143 @@ struct inet_peer {
struct inet_peer __rcu *avl_left, *avl_right;
struct inetpeer_addr daddr;
__u32 avl_height;
- struct list_head unused;
- __u32 dtime; /* the time of last use of not
- * referenced entries */
- atomic_t refcnt;
+
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
+ unsigned long rate_last;
+ union {
+ struct list_head gc_list;
+ struct rcu_head gc_rcu;
+ };
/*
- * Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
- * We can share memory with rcu_head to keep inet_peer small
+ * Once inet_peer is queued for deletion (refcnt == -1), following field
+ * is not available: rid
+ * We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ atomic_t rid; /* Frag reception counter */
};
struct rcu_head rcu;
+ struct inet_peer *gc_next;
};
+
+ /* following fields might be frequently dirtied */
+ __u32 dtime; /* the time of last use of not referenced entries */
+ atomic_t refcnt;
+};
+
+struct inet_peer_base {
+ struct inet_peer __rcu *root;
+ seqlock_t lock;
+ u32 flush_seq;
+ int total;
};
-void inet_initpeers(void) __init;
+#define INETPEER_BASE_BIT 0x1UL
+
+static inline struct inet_peer *inetpeer_ptr(unsigned long val)
+{
+ BUG_ON(val & INETPEER_BASE_BIT);
+ return (struct inet_peer *) val;
+}
+
+static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
+{
+ if (!(val & INETPEER_BASE_BIT))
+ return NULL;
+ val &= ~INETPEER_BASE_BIT;
+ return (struct inet_peer_base *) val;
+}
+
+static inline bool inetpeer_ptr_is_peer(unsigned long val)
+{
+ return !(val & INETPEER_BASE_BIT);
+}
+
+static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
+{
+ /* This implicitly clears INETPEER_BASE_BIT */
+ *val = (unsigned long) peer;
+}
+
+static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
+{
+ unsigned long val = (unsigned long) peer;
+ unsigned long orig = *ptr;
+
+ if (!(orig & INETPEER_BASE_BIT) ||
+ cmpxchg(ptr, orig, val) != orig)
+ return false;
+ return true;
+}
+
+static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
+{
+ *ptr = (unsigned long) base | INETPEER_BASE_BIT;
+}
+
+static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
+{
+ unsigned long val = *from;
+
+ *to = val;
+ if (inetpeer_ptr_is_peer(val)) {
+ struct inet_peer *peer = inetpeer_ptr(val);
+ atomic_inc(&peer->refcnt);
+ }
+}
+
+void inet_peer_base_init(struct inet_peer_base *);
+
+void inet_initpeers(void) __init;
+
+#define INETPEER_METRICS_NEW (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+ return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
/* can be called with or without local BH being disabled */
-struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
+struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ const struct inetpeer_addr *daddr,
+ int create);
-static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
+static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
+ __be32 v4daddr,
+ int create)
{
struct inetpeer_addr daddr;
- daddr.a4 = v4daddr;
+ daddr.addr.a4 = v4daddr;
daddr.family = AF_INET;
- return inet_getpeer(&daddr, create);
+ return inet_getpeer(base, &daddr, create);
}
-static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int create)
+static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
+ const struct in6_addr *v6daddr,
+ int create)
{
struct inetpeer_addr daddr;
- ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+ *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
daddr.family = AF_INET6;
- return inet_getpeer(&daddr, create);
+ return inet_getpeer(base, &daddr, create);
}
/* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
+void inetpeer_invalidate_tree(struct inet_peer_base *);
/*
- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * temporary check to make sure we dont access rid, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer
*/
static inline void inet_peer_refcheck(const struct inet_peer *p)
{
WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
}
-
-
-/* can be called with or without local BH being disabled */
-static inline __u16 inet_getid(struct inet_peer *p, int more)
-{
- more++;
- inet_peer_refcheck(p);
- return atomic_add_return(more, &p->ip_id_count) - more;
-}
-
#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 86e2b182a0c..7596eb22e1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -28,6 +28,7 @@
#include <linux/skbuff.h>
#include <net/inet_sock.h>
+#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
@@ -42,6 +43,8 @@ struct inet_skb_parm {
#define IPSKB_XFRM_TRANSFORMED 4
#define IPSKB_FRAG_COMPLETE 8
#define IPSKB_REROUTED 16
+
+ u16 frag_max_size;
};
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
@@ -52,11 +55,15 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
struct ipcm_cookie {
__be32 addr;
int oif;
- struct ip_options *opt;
+ struct ip_options_rcu *opt;
__u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
};
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
struct ip_ra_chain {
struct ip_ra_chain __rcu *next;
@@ -84,62 +91,77 @@ struct packet_type;
struct rtable;
struct sockaddr;
-extern int igmp_mc_proc_init(void);
+int igmp_mc_init(void);
/*
* Functions provided by ip.c
*/
-extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
- __be32 saddr, __be32 daddr,
- struct ip_options *opt);
-extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-extern int ip_local_deliver(struct sk_buff *skb);
-extern int ip_mr_input(struct sk_buff *skb);
-extern int ip_output(struct sk_buff *skb);
-extern int ip_mc_output(struct sk_buff *skb);
-extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-extern int ip_do_nat(struct sk_buff *skb);
-extern void ip_send_check(struct iphdr *ip);
-extern int __ip_local_out(struct sk_buff *skb);
-extern int ip_local_out(struct sk_buff *skb);
-extern int ip_queue_xmit(struct sk_buff *skb);
-extern void ip_init(void);
-extern int ip_append_data(struct sock *sk,
- int getfrag(void *from, char *to, int offset, int len,
- int odd, struct sk_buff *skb),
- void *from, int len, int protolen,
- struct ipcm_cookie *ipc,
- struct rtable **rt,
- unsigned int flags);
-extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
-extern ssize_t ip_append_page(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-extern int ip_push_pending_frames(struct sock *sk);
-extern void ip_flush_pending_frames(struct sock *sk);
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ __be32 saddr, __be32 daddr,
+ struct ip_options_rcu *opt);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct sock *sk, struct sk_buff *skb);
+int ip_mc_output(struct sock *sk, struct sk_buff *skb);
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_do_nat(struct sk_buff *skb);
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct sk_buff *skb);
+int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
+static inline int ip_local_out(struct sk_buff *skb)
+{
+ return ip_local_out_sk(skb->sk, skb);
+}
-/* datagram.c */
-extern int ip4_datagram_connect(struct sock *sk,
- struct sockaddr *uaddr, int addr_len);
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int len, int protolen,
+ struct ipcm_cookie *ipc,
+ struct rtable **rt,
+ unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+ int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
+ unsigned int flags);
+
+static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
+{
+ return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
+}
-/*
- * Map a multicast IP onto multicast MAC for type Token Ring.
- * This conforms to RFC1469 Option 2 Multicasting i.e.
- * using a functional address to transmit / receive
- * multicast packets.
- */
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
-static inline void ip_tr_mc_map(__be32 addr, char *buf)
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
{
- buf[0]=0xC0;
- buf[1]=0x00;
- buf[2]=0x00;
- buf[3]=0x04;
- buf[4]=0x00;
- buf[5]=0x00;
+ return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
}
+/* datagram.c */
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+void ip4_datagram_release_cb(struct sock *sk);
+
struct ip_reply_arg {
struct kvec iov[1];
int flags;
@@ -147,6 +169,7 @@ struct ip_reply_arg {
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
+ u8 tos;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -156,15 +179,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
- unsigned int len);
-
-struct ipv4_config {
- int log_martians;
- int no_pmtu_disc;
-};
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len);
-extern struct ipv4_config ipv4_config;
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
@@ -174,52 +192,60 @@ extern struct ipv4_config ipv4_config;
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
-extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
-extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else
-static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
}
#endif
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
-extern void snmp_mib_free(void __percpu *ptr[2]);
-extern struct local_ports {
- seqlock_t lock;
- int range[2];
-} sysctl_local_ports;
-extern void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
-extern unsigned long *sysctl_local_reserved_ports;
-static inline int inet_is_reserved_local_port(int port)
+#ifdef CONFIG_SYSCTL
+static inline int inet_is_local_reserved_port(struct net *net, int port)
{
- return test_bit(port, sysctl_local_reserved_ports);
+ if (!net->ipv4.sysctl_local_reserved_ports)
+ return 0;
+ return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
}
+#else
+static inline int inet_is_local_reserved_port(struct net *net, int port)
+{
+ return 0;
+}
+#endif
-extern int sysctl_ip_default_ttl;
extern int sysctl_ip_nonlocal_bind;
-extern struct ctl_path net_core_path[];
-extern struct ctl_path net_ipv4_ctl_path[];
-
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
-extern int inet_peer_gc_mintime;
-extern int inet_peer_gc_maxtime;
+
+/* From ip_input.c */
+extern int sysctl_ip_early_demux;
/* From ip_output.c */
extern int sysctl_ip_dynaddr;
-extern void ipfrag_init(void);
+void ipfrag_init(void);
+
+void ip_static_sysctl_init(void);
-extern void ip_static_sysctl_init(void);
+#define IP4_REPLY_MARK(net, mark) \
+ ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+
+static inline bool ip_is_fragment(const struct iphdr *iph)
+{
+ return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
+}
#ifdef CONFIG_INET
#include <net/dst.h>
@@ -243,32 +269,79 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
!(dst_metric_locked(dst, RTAX_MTU)));
}
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+static inline bool ip_sk_accept_pmtu(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
+ inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
+}
+
+static inline bool ip_sk_use_pmtu(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+}
-static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+static inline bool ip_sk_ignore_df(const struct sock *sk)
{
- if (iph->frag_off & htons(IP_DF)) {
+ return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
+ inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+}
+
+static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
+{
+ struct net *net = dev_net(dst->dev);
+
+ if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+ dst_metric_locked(dst, RTAX_MTU) ||
+ !forwarding)
+ return dst_mtu(dst);
+
+ return min(dst->dev->mtu, IP_MAX_MTU);
+}
+
+static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+{
+ if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
+ bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+ return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ } else {
+ return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
+ }
+}
+
+u32 ip_idents_reserve(u32 hash, int segs);
+void __ip_select_ident(struct iphdr *iph, int segs);
+
+static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
/* This is only to work around buggy Windows95/2000
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
- iph->id = (sk && inet_sk(sk)->inet_daddr) ?
- htons(inet_sk(sk)->inet_id++) : 0;
- } else
- __ip_select_ident(iph, dst, 0);
-}
-
-static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
-{
- if (iph->frag_off & htons(IP_DF)) {
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
- inet_sk(sk)->inet_id += 1 + more;
- } else
+ inet_sk(sk)->inet_id += segs;
+ } else {
iph->id = 0;
- } else
- __ip_select_ident(iph, dst, more);
+ }
+ } else {
+ __ip_select_ident(iph, segs);
+ }
+}
+
+static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+{
+ ip_select_ident_segs(skb, sk, 1);
+}
+
+static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ skb->len, proto, 0);
}
/*
@@ -324,19 +397,27 @@ static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, ch
buf[16] = addr & 0x0f;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
+ memcpy(buf, broadcast, 4);
+ else
+ memcpy(buf, &naddr, sizeof(naddr));
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#endif
static __inline__ void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
- memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+ memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
}
#endif
}
@@ -350,7 +431,7 @@ static inline int sk_mc_loop(struct sock *sk)
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
@@ -359,7 +440,7 @@ static inline int sk_mc_loop(struct sock *sk)
return 1;
}
-extern int ip_call_ra_chain(struct sk_buff *skb);
+bool ip_call_ra_chain(struct sk_buff *skb);
/*
* Functions provided by ip_fragment.c
@@ -376,10 +457,20 @@ enum ip_defrag_users {
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
- IP_DEFRAG_VS_FWD
+ IP_DEFRAG_VS_FWD,
+ IP_DEFRAG_AF_PACKET,
+ IP_DEFRAG_MACVLAN,
};
int ip_defrag(struct sk_buff *skb, u32 user);
+#ifdef CONFIG_INET
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+#else
+static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ return skb;
+}
+#endif
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
@@ -387,58 +478,53 @@ int ip_frag_nqueues(struct net *net);
* Functions provided by ip_forward.c
*/
-extern int ip_forward(struct sk_buff *skb);
+int ip_forward(struct sk_buff *skb);
/*
* Functions provided by ip_options.c
*/
-extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag);
-extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
-extern void ip_options_fragment(struct sk_buff *skb);
-extern int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb);
-extern int ip_options_get(struct net *net, struct ip_options **optp,
- unsigned char *data, int optlen);
-extern int ip_options_get_from_user(struct net *net, struct ip_options **optp,
- unsigned char __user *data, int optlen);
-extern void ip_options_undo(struct ip_options * opt);
-extern void ip_forward_options(struct sk_buff *skb);
-extern int ip_options_rcv_srr(struct sk_buff *skb);
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+ __be32 daddr, struct rtable *rt, int is_frag);
+int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+void ip_options_fragment(struct sk_buff *skb);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+ unsigned char *data, int optlen);
+int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+ unsigned char __user *data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb);
/*
* Functions provided by ip_sockglue.c
*/
-extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-extern int ip_cmsg_send(struct net *net,
- struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
-extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
-extern int compat_ip_setsockopt(struct sock *sk, int level,
- int optname, char __user *optval, unsigned int optlen);
-extern int compat_ip_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval, int __user *optlen);
-extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
-
-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
- __be16 port, u32 info, u8 *payload);
-extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
- u32 info);
-
-/* sysctl helpers - any sysctl which holds a value that ends up being
- * fed into the routing cache should use these handlers.
- */
-int ipv4_doint_and_flush(ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
-int ipv4_doint_and_flush_strategy(ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+int ip_cmsg_send(struct net *net, struct msghdr *msg,
+ struct ipcm_cookie *ipc, bool allow_ipv6);
+int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+ unsigned int optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+ int __user *optlen);
+int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+ void (*destructor)(struct sock *));
+
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+ u32 info);
+
#ifdef CONFIG_PROC_FS
-extern int ip_misc_proc_init(void);
+int ip_misc_proc_init(void);
#endif
#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index bc1b0fda2b0..55236cb7117 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -31,64 +31,68 @@
#include <net/ip.h>
#include <asm/checksum.h>
#include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
#ifndef _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum csum);
+#endif
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum csum)
+static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
{
+ return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len, proto, 0));
+}
- int carry;
- __u32 ulen;
- __u32 uproto;
- __u32 sum = (__force u32)csum;
-
- sum += (__force u32)saddr->s6_addr32[0];
- carry = (sum < (__force u32)saddr->s6_addr32[0]);
- sum += carry;
-
- sum += (__force u32)saddr->s6_addr32[1];
- carry = (sum < (__force u32)saddr->s6_addr32[1]);
- sum += carry;
-
- sum += (__force u32)saddr->s6_addr32[2];
- carry = (sum < (__force u32)saddr->s6_addr32[2]);
- sum += carry;
-
- sum += (__force u32)saddr->s6_addr32[3];
- carry = (sum < (__force u32)saddr->s6_addr32[3]);
- sum += carry;
-
- sum += (__force u32)daddr->s6_addr32[0];
- carry = (sum < (__force u32)daddr->s6_addr32[0]);
- sum += carry;
-
- sum += (__force u32)daddr->s6_addr32[1];
- carry = (sum < (__force u32)daddr->s6_addr32[1]);
- sum += carry;
-
- sum += (__force u32)daddr->s6_addr32[2];
- carry = (sum < (__force u32)daddr->s6_addr32[2]);
- sum += carry;
+static __inline__ __sum16 tcp_v6_check(int len,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __wsum base)
+{
+ return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
+}
- sum += (__force u32)daddr->s6_addr32[3];
- carry = (sum < (__force u32)daddr->s6_addr32[3]);
- sum += carry;
+static inline void __tcp_v6_send_check(struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ } else {
+ th->check = tcp_v6_check(skb->len, saddr, daddr,
+ csum_partial(th, th->doff << 2,
+ skb->csum));
+ }
+}
- ulen = (__force u32)htonl((__u32) len);
- sum += ulen;
- carry = (sum < ulen);
- sum += carry;
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
- uproto = (__force u32)htonl(proto);
- sum += uproto;
- carry = (sum < uproto);
- sum += carry;
+ __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+}
+#endif
- return csum_fold((__force __wsum)sum);
+static inline __sum16 udp_v6_check(int len,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __wsum base)
+{
+ return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base);
}
-#endif
+void udp6_set_csum(bool nocheck, struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int len);
+
+int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 708ff7cb880..9bcb220bd4a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -13,8 +13,6 @@
#ifndef _IP6_FIB_H
#define _IP6_FIB_H
-#ifdef __KERNEL__
-
#include <linux/ipv6_route.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
@@ -39,14 +37,18 @@ struct fib6_config {
int fc_ifindex;
u32 fc_flags;
u32 fc_protocol;
+ u32 fc_type; /* only 8 bits are used */
struct in6_addr fc_dst;
struct in6_addr fc_src;
+ struct in6_addr fc_prefsrc;
struct in6_addr fc_gateway;
unsigned long fc_expires;
struct nlattr *fc_mx;
int fc_mx_len;
+ int fc_mp_len;
+ struct nlattr *fc_mp;
struct nl_info fc_nlinfo;
};
@@ -87,10 +89,6 @@ struct fib6_table;
struct rt6_info {
struct dst_entry dst;
-#define rt6i_dev dst.dev
-#define rt6i_nexthop dst.neighbour
-#define rt6i_expires dst.expires
-
/*
* Tail elements of dst_entry (__refcnt etc.)
* and these elements (rarely used in hot path) are in
@@ -101,31 +99,112 @@ struct rt6_info {
struct in6_addr rt6i_gateway;
+ /* Multipath routes:
+ * siblings is a list of rt6_info that have the the same metric/weight,
+ * destination, but not the same gateway. nsiblings is just a cache
+ * to speed up lookup.
+ */
+ struct list_head rt6i_siblings;
+ unsigned int rt6i_nsiblings;
+
atomic_t rt6i_ref;
/* These are in a separate cache line. */
struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
u32 rt6i_flags;
struct rt6key rt6i_src;
+ struct rt6key rt6i_prefsrc;
u32 rt6i_metric;
struct inet6_dev *rt6i_idev;
- struct inet_peer *rt6i_peer;
+ unsigned long _rt6i_peer;
+
+ u32 rt6i_genid;
-#ifdef CONFIG_XFRM
- u32 rt6i_flow_cache_genid;
-#endif
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
};
+static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
+{
+ return inetpeer_ptr(rt->_rt6i_peer);
+}
+
+static inline bool rt6_has_peer(struct rt6_info *rt)
+{
+ return inetpeer_ptr_is_peer(rt->_rt6i_peer);
+}
+
+static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
+{
+ __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
+}
+
+static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
+{
+ return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
+}
+
+static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
+{
+ inetpeer_init_ptr(&rt->_rt6i_peer, base);
+}
+
+static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
+{
+ inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
+}
+
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
}
+static inline void rt6_clean_expires(struct rt6_info *rt)
+{
+ rt->rt6i_flags &= ~RTF_EXPIRES;
+ rt->dst.expires = 0;
+}
+
+static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
+{
+ rt->dst.expires = expires;
+ rt->rt6i_flags |= RTF_EXPIRES;
+}
+
+static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
+{
+ struct rt6_info *rt;
+
+ for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES);
+ rt = (struct rt6_info *)rt->dst.from);
+ if (rt && rt != rt0)
+ rt0->dst.expires = rt->dst.expires;
+
+ dst_set_expires(&rt0->dst, timeout);
+ rt0->rt6i_flags |= RTF_EXPIRES;
+}
+
+static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+{
+ struct dst_entry *new = (struct dst_entry *) from;
+
+ rt->rt6i_flags &= ~RTF_EXPIRES;
+ dst_hold(new);
+ rt->dst.from = new;
+}
+
+static inline void ip6_rt_put(struct rt6_info *rt)
+{
+ /* dst_release() accepts a NULL parameter.
+ * We rely on dst being first structure in struct rt6_info
+ */
+ BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0);
+ dst_release(&rt->dst);
+}
+
struct fib6_walker_t {
struct list_head lh;
struct fib6_node *root, *node;
@@ -162,6 +241,7 @@ struct fib6_table {
u32 tb6_id;
rwlock_t tb6_lock;
struct fib6_node tb6_root;
+ struct inet_peer_base tb6_peers;
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
@@ -182,50 +262,46 @@ struct fib6_table {
typedef struct rt6_info *(*pol_lookup_t)(struct net *,
struct fib6_table *,
- struct flowi *, int);
+ struct flowi6 *, int);
/*
* exported functions
*/
-extern struct fib6_table *fib6_get_table(struct net *net, u32 id);
-extern struct fib6_table *fib6_new_table(struct net *net, u32 id);
-extern struct dst_entry *fib6_rule_lookup(struct net *net,
- struct flowi *fl, int flags,
- pol_lookup_t lookup);
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+ int flags, pol_lookup_t lookup);
-extern struct fib6_node *fib6_lookup(struct fib6_node *root,
- struct in6_addr *daddr,
- struct in6_addr *saddr);
+struct fib6_node *fib6_lookup(struct fib6_node *root,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
-struct fib6_node *fib6_locate(struct fib6_node *root,
- struct in6_addr *daddr, int dst_len,
- struct in6_addr *saddr, int src_len);
+struct fib6_node *fib6_locate(struct fib6_node *root,
+ const struct in6_addr *daddr, int dst_len,
+ const struct in6_addr *saddr, int src_len);
-extern void fib6_clean_all(struct net *net,
- int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg);
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+ void *arg);
-extern int fib6_add(struct fib6_node *root,
- struct rt6_info *rt,
- struct nl_info *info);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
+ struct nlattr *mx, int mx_len);
-extern int fib6_del(struct rt6_info *rt,
- struct nl_info *info);
+int fib6_del(struct rt6_info *rt, struct nl_info *info);
-extern void inet6_rt_notify(int event, struct rt6_info *rt,
- struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
-extern void fib6_run_gc(unsigned long expires,
- struct net *net);
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
-extern void fib6_gc_cleanup(void);
+void fib6_gc_cleanup(void);
-extern int fib6_init(void);
+int fib6_init(void);
+
+int ipv6_route_open(struct inode *inode, struct file *file);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern int fib6_rules_init(void);
-extern void fib6_rules_cleanup(void);
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
#else
static inline int fib6_rules_init(void)
{
@@ -237,4 +313,3 @@ static inline void fib6_rules_cleanup(void)
}
#endif
#endif
-#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index e06e0ca1e91..1d09b46c1e4 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -1,9 +1,6 @@
#ifndef _NET_IP6_ROUTE_H
#define _NET_IP6_ROUTE_H
-#define IP6_RT_PRIO_USER 1024
-#define IP6_RT_PRIO_ADDRCONF 256
-
struct route_info {
__u8 type;
__u8 length;
@@ -21,13 +18,12 @@ struct route_info {
__u8 prefix[0]; /* 0,8 or 16 */
};
-#ifdef __KERNEL__
-
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/route.h>
#define RT6_LOOKUP_F_IFACE 0x00000001
#define RT6_LOOKUP_F_REACHABLE 0x00000002
@@ -36,6 +32,11 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
+ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
+ */
+#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
+
/*
* rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
* between IPV6_ADDR_PREFERENCES socket option values
@@ -55,80 +56,64 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
return (flags >> 3) & 7;
}
-extern void rt6_bind_peer(struct rt6_info *rt,
- int create);
-
-static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
+static inline bool rt6_need_strict(const struct in6_addr *daddr)
{
- if (rt->rt6i_peer)
- return rt->rt6i_peer;
-
- rt6_bind_peer(rt, 0);
- return rt->rt6i_peer;
+ return ipv6_addr_type(daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
-extern void ip6_route_input(struct sk_buff *skb);
+void ip6_route_input(struct sk_buff *skb);
-extern struct dst_entry * ip6_route_output(struct net *net,
- struct sock *sk,
- struct flowi *fl);
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6);
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ int flags);
-extern int ip6_route_init(void);
-extern void ip6_route_cleanup(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
-extern int ipv6_route_ioctl(struct net *net,
- unsigned int cmd,
- void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern int ip6_route_add(struct fib6_config *cfg);
-extern int ip6_ins_rt(struct rt6_info *);
-extern int ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg);
+int ip6_ins_rt(struct rt6_info *);
+int ip6_del_rt(struct rt6_info *);
-extern struct rt6_info *rt6_lookup(struct net *net,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr,
- int oif, int flags);
+int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr);
-extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
- struct neighbour *neigh,
- const struct in6_addr *addr);
-extern int icmp6_dst_gc(void);
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, int oif, int flags);
-extern void fib6_force_start_gc(struct net *net);
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+int icmp6_dst_gc(void);
-extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
- const struct in6_addr *addr,
- int anycast);
+void fib6_force_start_gc(struct net *net);
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ const struct in6_addr *addr, bool anycast);
/*
* support functions for ND
*
*/
-extern struct rt6_info * rt6_get_dflt_router(struct in6_addr *addr,
- struct net_device *dev);
-extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr,
- struct net_device *dev,
- unsigned int pref);
-
-extern void rt6_purge_dflt_routers(struct net *net);
-
-extern int rt6_route_rcv(struct net_device *dev,
- u8 *opt, int len,
- struct in6_addr *gwaddr);
-
-extern void rt6_redirect(struct in6_addr *dest,
- struct in6_addr *src,
- struct in6_addr *saddr,
- struct neighbour *neigh,
- u8 *lladdr,
- int on_link);
-
-extern void rt6_pmtu_discovery(struct in6_addr *daddr,
- struct in6_addr *saddr,
- struct net_device *dev,
- u32 pmtu);
+struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+ struct net_device *dev);
+struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+ struct net_device *dev, unsigned int pref);
+
+void rt6_purge_dflt_routers(struct net *net);
+
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ const struct in6_addr *gwaddr);
+
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+ u32 mark);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
@@ -138,16 +123,19 @@ struct rt6_rtnl_dump_arg {
struct net *net;
};
-extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
+int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+void rt6_ifdown(struct net *net, struct net_device *dev);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
/*
* Store a destination cache entry in a socket
*/
static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *) dst;
@@ -168,12 +156,45 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
spin_unlock(&sk->sk_dst_lock);
}
-static inline int ipv6_unicast_destination(struct sk_buff *skb)
+static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
{
struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
return rt->rt6i_flags & RTF_LOCAL;
}
-#endif
+static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+{
+ struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+
+ return rt->rt6i_flags & RTF_ANYCAST;
+}
+
+int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+
+static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+
+ return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
+ skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+}
+
+static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+}
+
+static inline bool ip6_sk_ignore_df(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
+ inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
+}
+
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+{
+ return &rt->rt6i_gateway;
+}
+
#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc73e667b50..a5593dab6af 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -3,22 +3,53 @@
#include <linux/ipv6.h>
#include <linux/netdevice.h>
+#include <linux/if_tunnel.h>
#include <linux/ip6_tunnel.h>
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
/* capable of sending packets */
#define IP6_TNL_F_CAP_XMIT 0x10000
/* capable of receiving packets */
#define IP6_TNL_F_CAP_RCV 0x20000
+/* determine capability on a per-packet basis */
+#define IP6_TNL_F_CAP_PER_PACKET 0x40000
-/* IPv6 tunnel */
+struct __ip6_tnl_parm {
+ char name[IFNAMSIZ]; /* name of tunnel device */
+ int link; /* ifindex of underlying L2 interface */
+ __u8 proto; /* tunnel protocol */
+ __u8 encap_limit; /* encapsulation limit for tunnel */
+ __u8 hop_limit; /* hop limit for tunnel */
+ __be32 flowinfo; /* traffic class and flowlabel for tunnel */
+ __u32 flags; /* tunnel flags */
+ struct in6_addr laddr; /* local tunnel end-point address */
+ struct in6_addr raddr; /* remote tunnel end-point address */
+ __be16 i_flags;
+ __be16 o_flags;
+ __be32 i_key;
+ __be32 o_key;
+};
+
+/* IPv6 tunnel */
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
- struct ip6_tnl_parm parms; /* tunnel configuration parameters */
+ struct net *net; /* netns for packet i/o */
+ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
struct dst_entry *dst_cache; /* cached dst */
u32 dst_cookie;
+
+ int err_count;
+ unsigned long err_time;
+
+ /* These fields used only by GRE */
+ __u32 i_seqno; /* The last seen seqno */
+ __u32 o_seqno; /* The last output seqno */
+ int hlen; /* Precalculated GRE header length */
+ int mlink;
};
/* Tunnel encapsulation limit destination sub-option */
@@ -29,4 +60,33 @@ struct ipv6_tlv_tnl_enc_lim {
__u8 encap_limit; /* tunnel encapsulation limit */
} __packed;
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+void ip6_tnl_dst_reset(struct ip6_tnl *t);
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
+
+static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+ int pkt_len, err;
+
+ pkt_len = skb->len;
+ err = ip6_local_out(skb);
+
+ if (net_xmit_eval(err) == 0) {
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else {
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+ }
+}
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 07bdb5e9e8a..9922093f575 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -18,7 +18,10 @@
#include <net/flow.h>
#include <linux/seq_file.h>
+#include <linux/rcupdate.h>
#include <net/fib_rules.h>
+#include <net/inetpeer.h>
+#include <linux/percpu.h>
struct fib_config {
u8 fc_dst_len;
@@ -44,22 +47,47 @@ struct fib_config {
};
struct fib_info;
+struct rtable;
+
+struct fib_nh_exception {
+ struct fib_nh_exception __rcu *fnhe_next;
+ int fnhe_genid;
+ __be32 fnhe_daddr;
+ u32 fnhe_pmtu;
+ __be32 fnhe_gw;
+ unsigned long fnhe_expires;
+ struct rtable __rcu *fnhe_rth_input;
+ struct rtable __rcu *fnhe_rth_output;
+ unsigned long fnhe_stamp;
+};
+
+struct fnhe_hash_bucket {
+ struct fib_nh_exception __rcu *chain;
+};
+
+#define FNHE_HASH_SIZE 2048
+#define FNHE_RECLAIM_DEPTH 5
struct fib_nh {
struct net_device *nh_dev;
struct hlist_node nh_hash;
struct fib_info *nh_parent;
- unsigned nh_flags;
+ unsigned int nh_flags;
unsigned char nh_scope;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int nh_weight;
int nh_power;
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 nh_tclassid;
#endif
int nh_oif;
__be32 nh_gw;
+ __be32 nh_saddr;
+ int nh_saddr_genid;
+ struct rtable __rcu * __percpu *nh_pcpu_rth_output;
+ struct rtable __rcu *nh_rth_input;
+ struct fnhe_hash_bucket *nh_exceptions;
};
/*
@@ -72,12 +100,14 @@ struct fib_info {
struct net *fib_net;
int fib_treeref;
atomic_t fib_clntref;
- int fib_dead;
- unsigned fib_flags;
- int fib_protocol;
+ unsigned int fib_flags;
+ unsigned char fib_dead;
+ unsigned char fib_protocol;
+ unsigned char fib_scope;
+ unsigned char fib_type;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 fib_metrics[RTAX_MAX];
+ u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
#define fib_window fib_metrics[RTAX_WINDOW-1]
#define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,15 +126,16 @@ struct fib_info {
struct fib_rule;
#endif
+struct fib_table;
struct fib_result {
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
+ u32 tclassid;
struct fib_info *fi;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- struct fib_rule *r;
-#endif
+ struct fib_table *table;
+ struct list_head *fa_head;
};
struct fib_result_nl {
@@ -123,42 +154,47 @@ struct fib_result_nl {
};
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-
#define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-
-#define FIB_TABLE_HASHSZ 2
-
#else /* CONFIG_IP_ROUTE_MULTIPATH */
-
#define FIB_RES_NH(res) ((res).fi->fib_nh[0])
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+#ifdef CONFIG_IP_MULTIPLE_TABLES
#define FIB_TABLE_HASHSZ 256
+#else
+#define FIB_TABLE_HASHSZ 2
+#endif
-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
-#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res))
+#define FIB_RES_SADDR(net, res) \
+ ((FIB_RES_NH(res).nh_saddr_genid == \
+ atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
+ FIB_RES_NH(res).nh_saddr : \
+ fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
+#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
+ FIB_RES_SADDR(net, res))
+
struct fib_table {
- struct hlist_node tb_hlist;
- u32 tb_id;
- int tb_default;
- unsigned char tb_data[0];
+ struct hlist_node tb_hlist;
+ u32 tb_id;
+ int tb_default;
+ int tb_num_default;
+ unsigned long tb_data[0];
};
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
- struct fib_result *res, int fib_flags);
-extern int fib_table_insert(struct fib_table *, struct fib_config *);
-extern int fib_table_delete(struct fib_table *, struct fib_config *);
-extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
- struct netlink_callback *cb);
-extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
- const struct flowi *flp,
- struct fib_result *res);
-extern void fib_free_table(struct fib_table *tb);
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ struct fib_result *res, int fib_flags);
+int fib_table_insert(struct fib_table *, struct fib_config *);
+int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+ struct netlink_callback *cb);
+int fib_table_flush(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
@@ -182,7 +218,7 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
return fib_get_table(net, id);
}
-static inline int fib_lookup(struct net *net, const struct flowi *flp,
+static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
struct fib_result *res)
{
struct fib_table *table;
@@ -198,50 +234,78 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
}
#else /* CONFIG_IP_MULTIPLE_TABLES */
-extern int __net_init fib4_rules_init(struct net *net);
-extern void __net_exit fib4_rules_exit(struct net *net);
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
-#endif
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
-extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
-extern struct fib_table *fib_new_table(struct net *net, u32 id);
-extern struct fib_table *fib_get_table(struct net *net, u32 id);
+static inline int fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res)
+{
+ if (!net->ipv4.fib_has_custom_rules) {
+ res->tclassid = 0;
+ if (net->ipv4.fib_local &&
+ !fib_table_lookup(net->ipv4.fib_local, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ if (net->ipv4.fib_main &&
+ !fib_table_lookup(net->ipv4.fib_main, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ if (net->ipv4.fib_default &&
+ !fib_table_lookup(net->ipv4.fib_default, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ return -ENETUNREACH;
+ }
+ return __fib_lookup(net, flp, res);
+}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
-extern void ip_fib_init(void);
-extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
- struct net_device *dev, __be32 *spec_dst,
- u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
- struct fib_result *res);
+void ip_fib_init(void);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag);
+void fib_select_default(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return net->ipv4.fib_num_tclassid_users;
+}
+#else
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return 0;
+}
+#endif
/* Exported by fib_semantics.c */
-extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
-extern int fib_sync_down_dev(struct net_device *dev, int force);
-extern int fib_sync_down_addr(struct net *net, __be32 local);
-extern int fib_sync_up(struct net_device *dev);
-extern __be32 __fib_res_prefsrc(struct fib_result *res);
-extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
-
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
-
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_up(struct net_device *dev);
+void fib_select_multipath(struct fib_result *res);
+
+/* Exported by fib_trie.c */
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id);
+
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag;
#endif
*itag = FIB_RES_NH(*res).nh_tclassid<<16;
#ifdef CONFIG_IP_MULTIPLE_TABLES
- rtag = fib_rules_tclass(res);
+ rtag = res->tclassid;
if (*itag == 0)
*itag = (rtag<<16);
*itag |= (rtag>>16);
@@ -249,7 +313,7 @@ static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
#endif
}
-extern void free_fib_info(struct fib_info *fi);
+void free_fib_info(struct fib_info *fi);
static inline void fib_info_put(struct fib_info *fi)
{
@@ -258,8 +322,8 @@ static inline void fib_info_put(struct fib_info *fi)
}
#ifdef CONFIG_PROC_FS
-extern int __net_init fib_proc_init(struct net *net);
-extern void __net_exit fib_proc_exit(struct net *net);
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
#else
static inline int fib_proc_init(struct net *net)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
new file mode 100644
index 00000000000..a4daf9eb856
--- /dev/null
+++ b/include/net/ip_tunnels.h
@@ -0,0 +1,184 @@
+#ifndef __NET_IP_TUNNELS_H
+#define __NET_IP_TUNNELS_H 1
+
+#include <linux/if_tunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <net/dsfield.h>
+#include <net/gro_cells.h>
+#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#endif
+
+/* Keep error state on tunnel for 30 sec */
+#define IPTUNNEL_ERR_TIMEO (30*HZ)
+
+/* 6rd prefix/relay information */
+#ifdef CONFIG_IPV6_SIT_6RD
+struct ip_tunnel_6rd_parm {
+ struct in6_addr prefix;
+ __be32 relay_prefix;
+ u16 prefixlen;
+ u16 relay_prefixlen;
+};
+#endif
+
+struct ip_tunnel_prl_entry {
+ struct ip_tunnel_prl_entry __rcu *next;
+ __be32 addr;
+ u16 flags;
+ struct rcu_head rcu_head;
+};
+
+struct ip_tunnel_dst {
+ struct dst_entry __rcu *dst;
+};
+
+struct ip_tunnel {
+ struct ip_tunnel __rcu *next;
+ struct hlist_node hash_node;
+ struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
+
+ int err_count; /* Number of arrived ICMP errors */
+ unsigned long err_time; /* Time when the last ICMP error
+ * arrived */
+
+ /* These four fields used only by GRE */
+ __u32 i_seqno; /* The last seen seqno */
+ __u32 o_seqno; /* The last output seqno */
+ int hlen; /* Precalculated header length */
+ int mlink;
+
+ struct ip_tunnel_dst __percpu *dst_cache;
+
+ struct ip_tunnel_parm parms;
+
+ /* for SIT */
+#ifdef CONFIG_IPV6_SIT_6RD
+ struct ip_tunnel_6rd_parm ip6rd;
+#endif
+ struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
+ unsigned int prl_count; /* # of entries in PRL */
+ int ip_tnl_net_id;
+ struct gro_cells gro_cells;
+};
+
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+
+struct tnl_ptk_info {
+ __be16 flags;
+ __be16 proto;
+ __be32 key;
+ __be32 seq;
+};
+
+#define PACKET_RCVD 0
+#define PACKET_REJECT 1
+
+#define IP_TNL_HASH_BITS 7
+#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
+
+struct ip_tunnel_net {
+ struct net_device *fb_tunnel_dev;
+ struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+};
+
+#ifdef CONFIG_INET
+
+int ip_tunnel_init(struct net_device *dev);
+void ip_tunnel_uninit(struct net_device *dev);
+void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+ struct rtnl_link_ops *ops, char *devname);
+
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
+
+void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const struct iphdr *tnl_params, const u8 protocol);
+int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
+
+struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot);
+struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ int link, __be16 flags,
+ __be32 remote, __be32 local,
+ __be32 key);
+
+int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi, bool log_ecn_error);
+int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct ip_tunnel_parm *p);
+int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
+ struct ip_tunnel_parm *p);
+void ip_tunnel_setup(struct net_device *dev, int net_id);
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
+
+/* Extract dsfield from inner protocol */
+static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return iph->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
+/* Propogate ECN bits out */
+static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ u8 inner = ip_tunnel_get_dsfield(iph, skb);
+
+ return INET_ECN_encapsulate(tos, inner);
+}
+
+int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
+int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 proto,
+ __u8 tos, __u8 ttl, __be16 df, bool xnet);
+
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+ int gso_type_mask);
+
+static inline void iptunnel_xmit_stats(int err,
+ struct net_device_stats *err_stats,
+ struct pcpu_sw_netstats __percpu *stats)
+{
+ if (err > 0) {
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += err;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else if (err < 0) {
+ err_stats->tx_errors++;
+ err_stats->tx_aborted_errors++;
+ } else {
+ err_stats->tx_dropped++;
+ }
+}
+
+#endif /* CONFIG_INET */
+
+#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index b7bbd6c28cf..624a8a54806 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -8,56 +8,162 @@
#include <linux/ip_vs.h> /* definitions shared with userland */
-/* old ipvsadm versions still include this file directly */
-#ifdef __KERNEL__
-
#include <asm/types.h> /* for __uXX types */
-#include <linux/sysctl.h> /* for ctl_path */
#include <linux/list.h> /* for struct list_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
-#include <asm/atomic.h> /* for struct atomic_t */
+#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/compiler.h>
#include <linux/timer.h>
+#include <linux/bug.h>
#include <net/checksum.h>
#include <linux/netfilter.h> /* for union nf_inet_addr */
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
-#include <net/ipv6.h> /* for ipv6_addr_copy */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/ipv6.h>
+#if IS_ENABLED(CONFIG_IP_VS_IPV6)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
+#include <net/net_namespace.h> /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+ return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /*
+ * This is used for debug only.
+ * Start with the most likely hit
+ * End with BUG
+ */
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ if (skb_dst(skb) && skb_dst(skb)->dev)
+ return dev_net(skb_dst(skb)->dev);
+ WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /* Start with the most likely hit */
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return sock_net(skb->sk);
+#endif
+#else
+ return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net can't be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
-
struct ip_vs_iphdr {
- int len;
- __u8 protocol;
+ __u32 len; /* IPv4 simply where L4 starts
+ IPv6 where L4 Transport Header starts */
+ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
+ __s16 protocol;
+ __s32 flags;
union nf_inet_addr saddr;
union nf_inet_addr daddr;
};
+static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
+ int len, void *buffer,
+ const struct ip_vs_iphdr *ipvsh)
+{
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
+static inline void
+ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
+{
+ const struct iphdr *iph = nh;
+
+ iphdr->len = iph->ihl * 4;
+ iphdr->fragoffs = 0;
+ iphdr->protocol = iph->protocol;
+ iphdr->saddr.ip = iph->saddr;
+ iphdr->daddr.ip = iph->daddr;
+}
+
+/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
+ * IPv6 requires some extra work, as finding proper header position,
+ * depend on the IPv6 extension headers.
+ */
static inline void
-ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
+ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- const struct ipv6hdr *iph = nh;
- iphdr->len = sizeof(struct ipv6hdr);
- iphdr->protocol = iph->nexthdr;
- ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
- ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+ const struct ipv6hdr *iph =
+ (struct ipv6hdr *)skb_network_header(skb);
+ iphdr->saddr.in6 = iph->saddr;
+ iphdr->daddr.in6 = iph->daddr;
+ /* ipv6_find_hdr() updates len, flags */
+ iphdr->len = 0;
+ iphdr->flags = 0;
+ iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
+ &iphdr->fragoffs,
+ &iphdr->flags);
} else
#endif
{
- const struct iphdr *iph = nh;
- iphdr->len = iph->ihl * 4;
- iphdr->protocol = iph->protocol;
- iphdr->saddr.ip = iph->saddr;
- iphdr->daddr.ip = iph->daddr;
+ const struct iphdr *iph =
+ (struct iphdr *)skb_network_header(skb);
+ iphdr->len = iph->ihl * 4;
+ iphdr->fragoffs = 0;
+ iphdr->protocol = iph->protocol;
+ iphdr->saddr.ip = iph->saddr;
+ iphdr->daddr.ip = iph->daddr;
}
}
@@ -66,12 +172,27 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- ipv6_addr_copy(&dst->in6, &src->in6);
+ dst->in6 = src->in6;
else
#endif
dst->ip = src->ip;
}
+static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst,
+ const union nf_inet_addr *src)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+ dst->in6 = src->in6;
+ return;
+ }
+#endif
+ dst->ip = src->ip;
+ dst->all[1] = 0;
+ dst->all[2] = 0;
+ dst->all[3] = 0;
+}
+
static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
const union nf_inet_addr *b)
{
@@ -85,7 +206,7 @@ static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
#ifdef CONFIG_IP_VS_DEBUG
#include <linux/net.h>
-extern int ip_vs_get_debug_level(void);
+int ip_vs_get_debug_level(void);
static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
const union nf_inet_addr *addr,
@@ -94,7 +215,7 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
int len;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6]",
+ len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]",
&addr->in6) + 1;
else
#endif
@@ -183,8 +304,6 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
#define LeaveFunction(level) do {} while (0)
#endif
-#define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); }
-
/*
* The port number of FTP service (in network order).
@@ -231,17 +350,18 @@ enum {
*/
enum ip_vs_sctp_states {
IP_VS_SCTP_S_NONE,
- IP_VS_SCTP_S_INIT_CLI,
- IP_VS_SCTP_S_INIT_SER,
- IP_VS_SCTP_S_INIT_ACK_CLI,
- IP_VS_SCTP_S_INIT_ACK_SER,
- IP_VS_SCTP_S_ECHO_CLI,
- IP_VS_SCTP_S_ECHO_SER,
+ IP_VS_SCTP_S_INIT1,
+ IP_VS_SCTP_S_INIT,
+ IP_VS_SCTP_S_COOKIE_SENT,
+ IP_VS_SCTP_S_COOKIE_REPLIED,
+ IP_VS_SCTP_S_COOKIE_WAIT,
+ IP_VS_SCTP_S_COOKIE,
+ IP_VS_SCTP_S_COOKIE_ECHOED,
IP_VS_SCTP_S_ESTABLISHED,
- IP_VS_SCTP_S_SHUT_CLI,
- IP_VS_SCTP_S_SHUT_SER,
- IP_VS_SCTP_S_SHUT_ACK_CLI,
- IP_VS_SCTP_S_SHUT_ACK_SER,
+ IP_VS_SCTP_S_SHUTDOWN_SENT,
+ IP_VS_SCTP_S_SHUTDOWN_RECEIVED,
+ IP_VS_SCTP_S_SHUTDOWN_ACK_SENT,
+ IP_VS_SCTP_S_REJECTED,
IP_VS_SCTP_S_CLOSED,
IP_VS_SCTP_S_LAST
};
@@ -258,6 +378,23 @@ struct ip_vs_seq {
before last resized pkt */
};
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters ustats;
+ struct u64_stats_sync syncp;
+};
/*
* IPVS statistics objects
@@ -279,10 +416,11 @@ struct ip_vs_estimator {
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_stats_user ustats; /* statistics */
struct ip_vs_estimator est; /* estimator */
-
- spinlock_t lock; /* spin lock */
+ struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
+ spinlock_t lock; /* spin lock */
+ struct ip_vs_stats_user ustats0; /* reset values */
};
struct dst_entry;
@@ -290,6 +428,7 @@ struct iphdr;
struct ip_vs_conn;
struct ip_vs_app;
struct sk_buff;
+struct ip_vs_proto_data;
struct ip_vs_protocol {
struct ip_vs_protocol *next;
@@ -297,51 +436,50 @@ struct ip_vs_protocol {
u16 protocol;
u16 num_states;
int dont_defrag;
- atomic_t appcnt; /* counter of proto app incs */
- int *timeout_table; /* protocol timeout table */
void (*init)(struct ip_vs_protocol *pp);
void (*exit)(struct ip_vs_protocol *pp);
+ int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+ void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
int (*conn_schedule)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp,
- int *verdict, struct ip_vs_conn **cpp);
+ struct ip_vs_proto_data *pd,
+ int *verdict, struct ip_vs_conn **cpp,
+ struct ip_vs_iphdr *iph);
struct ip_vs_conn *
(*conn_in_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
- unsigned int proto_off,
int inverse);
struct ip_vs_conn *
(*conn_out_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
- unsigned int proto_off,
int inverse);
- int (*snat_handler)(struct sk_buff *skb,
- struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+ int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
- int (*dnat_handler)(struct sk_buff *skb,
- struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
+ int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
int (*csum_check)(int af, struct sk_buff *skb,
struct ip_vs_protocol *pp);
const char *(*state_name)(int state);
- int (*state_transition)(struct ip_vs_conn *cp, int direction,
- const struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ void (*state_transition)(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb,
+ struct ip_vs_proto_data *pd);
- int (*register_app)(struct ip_vs_app *inc);
+ int (*register_app)(struct net *net, struct ip_vs_app *inc);
- void (*unregister_app)(struct ip_vs_app *inc);
+ void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
int (*app_conn_bind)(struct ip_vs_conn *cp);
@@ -350,14 +488,26 @@ struct ip_vs_protocol {
int offset,
const char *msg);
- void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+ void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
- int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table; /* protocol timeout table */
+ atomic_t appcnt; /* counter of proto app incs. */
+ struct tcp_states_t *tcp_state_table;
};
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+ unsigned short proto);
struct ip_vs_conn_param {
+ struct net *net;
const union nf_inet_addr *caddr;
const union nf_inet_addr *vaddr;
__be16 cport;
@@ -374,18 +524,20 @@ struct ip_vs_conn_param {
* IP_VS structure allocated for each dynamically scheduled connection
*/
struct ip_vs_conn {
- struct list_head c_list; /* hashed list heads */
-
+ struct hlist_node c_list; /* hashed list heads */
/* Protocol, addresses and port numbers */
- u16 af; /* address family */
- union nf_inet_addr caddr; /* client address */
- union nf_inet_addr vaddr; /* virtual address */
- union nf_inet_addr daddr; /* destination address */
- volatile __u32 flags; /* status flags */
- __be16 cport;
- __be16 vport;
- __be16 dport;
+ __be16 cport;
+ __be16 dport;
+ __be16 vport;
+ u16 af; /* address family */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ union nf_inet_addr daddr; /* destination address */
+ volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
+#ifdef CONFIG_NET_NS
+ struct net *net; /* Name space */
+#endif
/* counter and timer */
atomic_t refcnt; /* reference count */
@@ -399,6 +551,8 @@ struct ip_vs_conn {
* state transition triggerd
* synchronization
*/
+ __u32 fwmark; /* Fire wall mark from skb */
+ unsigned long sync_endtime; /* jiffies + sent_retries */
/* Control members */
struct ip_vs_conn *control; /* Master control connection */
@@ -412,7 +566,7 @@ struct ip_vs_conn {
NF_ACCEPT can be returned when destination is local.
*/
int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp);
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
/* Note: we can group the following members into a structure,
in order to save more space, and the following members are
@@ -422,10 +576,40 @@ struct ip_vs_conn {
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ const struct ip_vs_pe *pe;
char *pe_data;
__u8 pe_data_len;
+
+ struct rcu_head rcu_head;
};
+/*
+ * To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net;
+#else
+ return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+ struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net == net;
+#else
+ return 1;
+#endif
+}
/*
* Extended internal versions of struct ip_vs_service_user and
@@ -440,25 +624,25 @@ struct ip_vs_service_user_kern {
u16 af;
u16 protocol;
union nf_inet_addr addr; /* virtual ip address */
- u16 port;
+ __be16 port;
u32 fwmark; /* firwall mark of service */
/* virtual service options */
char *sched_name;
char *pe_name;
- unsigned flags; /* virtual service flags */
- unsigned timeout; /* persistent timeout in sec */
- u32 netmask; /* persistent netmask */
+ unsigned int flags; /* virtual service flags */
+ unsigned int timeout; /* persistent timeout in sec */
+ __be32 netmask; /* persistent netmask or plen */
};
struct ip_vs_dest_user_kern {
/* destination server address */
union nf_inet_addr addr;
- u16 port;
+ __be16 port;
/* real server options */
- unsigned conn_flags; /* connection flags */
+ unsigned int conn_flags; /* connection flags */
int weight; /* destination weight */
/* thresholds for active connections */
@@ -472,34 +656,42 @@ struct ip_vs_dest_user_kern {
* and the forwarding entries
*/
struct ip_vs_service {
- struct list_head s_list; /* for normal service table */
- struct list_head f_list; /* for fwmark-based service table */
+ struct hlist_node s_list; /* for normal service table */
+ struct hlist_node f_list; /* for fwmark-based service table */
atomic_t refcnt; /* reference counter */
- atomic_t usecnt; /* use counter */
u16 af; /* address family */
__u16 protocol; /* which protocol (TCP/UDP) */
union nf_inet_addr addr; /* IP address for virtual service */
__be16 port; /* port number for the service */
__u32 fwmark; /* firewall mark of the service */
- unsigned flags; /* service status flags */
- unsigned timeout; /* persistent timeout in ticks */
- __be32 netmask; /* grouping granularity */
+ unsigned int flags; /* service status flags */
+ unsigned int timeout; /* persistent timeout in ticks */
+ __be32 netmask; /* grouping granularity, mask/plen */
+ struct net *net;
struct list_head destinations; /* real server d-linked list */
__u32 num_dests; /* number of servers */
struct ip_vs_stats stats; /* statistics for the service */
- struct ip_vs_app *inc; /* bind conns to this app inc */
/* for scheduling */
- struct ip_vs_scheduler *scheduler; /* bound scheduler object */
- rwlock_t sched_lock; /* lock sched_data */
+ struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */
+ spinlock_t sched_lock; /* lock sched_data */
void *sched_data; /* scheduler application data */
/* alternate persistence engine */
- struct ip_vs_pe *pe;
+ struct ip_vs_pe __rcu *pe;
+
+ struct rcu_head rcu_head;
};
+/* Information for cached dst */
+struct ip_vs_dest_dst {
+ struct dst_entry *dst_cache; /* destination cache entry */
+ u32 dst_cookie;
+ union nf_inet_addr dst_saddr;
+ struct rcu_head rcu_head;
+};
/*
* The real server destination forwarding entry
@@ -507,17 +699,18 @@ struct ip_vs_service {
*/
struct ip_vs_dest {
struct list_head n_list; /* for the dests in the service */
- struct list_head d_list; /* for table with all the dests */
+ struct hlist_node d_list; /* for table with all the dests */
u16 af; /* address family */
- union nf_inet_addr addr; /* IP address of the server */
__be16 port; /* port number of the server */
- volatile unsigned flags; /* dest status flags */
+ union nf_inet_addr addr; /* IP address of the server */
+ volatile unsigned int flags; /* dest status flags */
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
atomic_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
+ unsigned long idle_start; /* start time, jiffies */
/* connection counters and thresholds */
atomic_t activeconns; /* active connections */
@@ -528,19 +721,17 @@ struct ip_vs_dest {
/* for destination cache */
spinlock_t dst_lock; /* lock of dst_cache */
- struct dst_entry *dst_cache; /* destination cache entry */
- u32 dst_rtos; /* RT_TOS(tos) for dst */
- u32 dst_cookie;
-#ifdef CONFIG_IP_VS_IPV6
- struct in6_addr dst_saddr;
-#endif
+ struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
/* for virtual service */
- struct ip_vs_service *svc; /* service it belongs to */
+ struct ip_vs_service __rcu *svc; /* service it belongs to */
__u16 protocol; /* which protocol (TCP/UDP) */
- union nf_inet_addr vaddr; /* virtual IP address */
__be16 vport; /* virtual port number */
+ union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
+
+ struct list_head t_list; /* in dest_trash */
+ unsigned int in_rs_table:1; /* we are in rs_table */
};
@@ -556,13 +747,18 @@ struct ip_vs_scheduler {
/* scheduler initializing service */
int (*init_service)(struct ip_vs_service *svc);
/* scheduling service finish */
- int (*done_service)(struct ip_vs_service *svc);
- /* scheduler updating service */
- int (*update_service)(struct ip_vs_service *svc);
+ void (*done_service)(struct ip_vs_service *svc);
+ /* dest is linked */
+ int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
+ /* dest is unlinked */
+ int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
+ /* dest is updated */
+ int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
/* selecting a server from the given service */
struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
- const struct sk_buff *skb);
+ const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph);
};
/* The persistence engine object */
@@ -597,6 +793,7 @@ struct ip_vs_app {
struct ip_vs_app *app; /* its real application */
__be16 port; /* port number in net order */
atomic_t usecnt; /* usage counter */
+ struct rcu_head rcu_head;
/*
* output hook: Process packet in inout direction, diff set for TCP.
@@ -636,13 +833,11 @@ struct ip_vs_app {
struct ip_vs_conn *
(*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
- const struct iphdr *iph, unsigned int proto_off,
- int inverse);
+ const struct iphdr *iph, int inverse);
struct ip_vs_conn *
(*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
- const struct iphdr *iph, unsigned int proto_off,
- int inverse);
+ const struct iphdr *iph, int inverse);
int (*state_transition)(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
@@ -651,13 +846,305 @@ struct ip_vs_app {
void (*timeout_change)(struct ip_vs_app *app, int flags);
};
+struct ipvs_master_sync_state {
+ struct list_head sync_queue;
+ struct ip_vs_sync_buff *sync_buff;
+ unsigned long sync_queue_len;
+ unsigned int sync_queue_delay;
+ struct task_struct *master_thread;
+ struct delayed_work master_wakeup_work;
+ struct netns_ipvs *ipvs;
+};
+
+/* How much time to keep dests in trash */
+#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
+
+/* IPVS in network namespace */
+struct netns_ipvs {
+ int gen; /* Generation */
+ int enable; /* enable like nf_hooks do */
+ /*
+ * Hash table: for real service lookups
+ */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct hlist_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats tot_stats; /* Statistics & est. */
+
+ int num_services; /* no of virtual services */
+
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ spinlock_t dest_trash_lock;
+ struct timer_list dest_trash_timer; /* expiration timer */
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+
+#ifdef CONFIG_SYSCTL
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+#endif
+
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_sync_ports;
+ int sysctl_sync_persist_mode;
+ unsigned long sysctl_sync_qlen_max;
+ int sysctl_sync_sock_size;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_sloppy_tcp;
+ int sysctl_sloppy_sctp;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ unsigned int sysctl_sync_refresh_period;
+ int sysctl_sync_retries;
+ int sysctl_nat_icmp_send;
+ int sysctl_pmtu_disc;
+ int sysctl_backup_only;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ spinlock_t sync_lock;
+ struct ipvs_master_sync_state *ms;
+ spinlock_t sync_buff_lock;
+ struct task_struct **backup_threads;
+ int threads_mask;
+ int send_mesg_maxlen;
+ int recv_mesg_maxlen;
+ volatile int sync_state;
+ volatile int master_syncid;
+ volatile int backup_syncid;
+ struct mutex sync_mutex;
+ /* multicast interface name */
+ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+};
+
+#define DEFAULT_SYNC_THRESHOLD 3
+#define DEFAULT_SYNC_PERIOD 50
+#define DEFAULT_SYNC_VER 1
+#define DEFAULT_SLOPPY_TCP 0
+#define DEFAULT_SLOPPY_SCTP 0
+#define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ)
+#define DEFAULT_SYNC_RETRIES 0
+#define IPVS_SYNC_WAKEUP_RATE 8
+#define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4)
+#define IPVS_SYNC_SEND_DELAY (HZ / 50)
+#define IPVS_SYNC_CHECK_PERIOD HZ
+#define IPVS_SYNC_FLUSH_TIME (HZ * 2)
+#define IPVS_SYNC_PORTS_MAX (1 << 6)
+
+#ifdef CONFIG_SYSCTL
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_threshold[0];
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
+}
+
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+ return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_retries;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_ver;
+}
+
+static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sloppy_tcp;
+}
+
+static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sloppy_sctp;
+}
+
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+ return ACCESS_ONCE(ipvs->sysctl_sync_ports);
+}
+
+static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_persist_mode;
+}
+
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_qlen_max;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_sock_size;
+}
+
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_pmtu_disc;
+}
+
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+ return ipvs->sync_state & IP_VS_STATE_BACKUP &&
+ ipvs->sysctl_backup_only;
+}
+
+#else
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_THRESHOLD;
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_PERIOD;
+}
+
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_REFRESH_PERIOD;
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_RETRIES & 3;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_VER;
+}
+
+static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SLOPPY_TCP;
+}
+
+static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SLOPPY_SCTP;
+}
+
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+ return IPVS_SYNC_QLEN_MAX;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
+#endif
/*
* IPVS core functions
* (from ip_vs_core.c)
*/
-extern const char *ip_vs_proto_name(unsigned proto);
-extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
@@ -674,13 +1161,14 @@ enum {
IP_VS_DIR_LAST,
};
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
const union nf_inet_addr *caddr,
__be16 cport,
const union nf_inet_addr *vaddr,
__be16 vport,
struct ip_vs_conn_param *p)
{
+ p->net = net;
p->af = af;
p->protocol = protocol;
p->caddr = caddr;
@@ -695,40 +1183,46 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
- unsigned int proto_off,
int inverse);
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
- unsigned int proto_off,
int inverse);
+/* Get reference to gain full access to conn.
+ * By default, RCU read-side critical sections have access only to
+ * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference.
+ */
+static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
+{
+ return atomic_inc_not_zero(&cp->refcnt);
+}
+
/* put back the conn without restarting its timer */
static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
{
+ smp_mb__before_atomic();
atomic_dec(&cp->refcnt);
}
-extern void ip_vs_conn_put(struct ip_vs_conn *cp);
-extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr,
- __be16 dport, unsigned flags,
- struct ip_vs_dest *dest);
-extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+ __be16 dport, unsigned int flags,
+ struct ip_vs_dest *dest, __u32 fwmark);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
-extern const char * ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(__u16 proto, int state);
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
-extern int ip_vs_conn_init(void);
-extern void ip_vs_conn_cleanup(void);
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct);
+void ip_vs_random_dropentry(struct net *net);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
static inline void ip_vs_control_del(struct ip_vs_conn *cp)
{
@@ -790,47 +1284,68 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
+/*
+ * IPVS netns init & cleanup functions
+ */
+int ip_vs_estimator_net_init(struct net *net);
+int ip_vs_control_net_init(struct net *net);
+int ip_vs_protocol_net_init(struct net *net);
+int ip_vs_app_net_init(struct net *net);
+int ip_vs_conn_net_init(struct net *net);
+int ip_vs_sync_net_init(struct net *net);
+void ip_vs_conn_net_cleanup(struct net *net);
+void ip_vs_app_net_cleanup(struct net *net);
+void ip_vs_protocol_net_cleanup(struct net *net);
+void ip_vs_control_net_cleanup(struct net *net);
+void ip_vs_estimator_net_cleanup(struct net *net);
+void ip_vs_sync_net_cleanup(struct net *net);
+void ip_vs_service_net_cleanup(struct net *net);
/*
* IPVS application functions
* (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
-extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
-extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
-extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
-
-extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_init(void);
-extern void ip_vs_app_cleanup(void);
-
-void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
-void ip_vs_unbind_pe(struct ip_vs_service *svc);
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+/*
+ * Use a #define to avoid all of module.h just for these trivial ops
+ */
+#define ip_vs_pe_get(pe) \
+ if (pe && pe->module) \
+ __module_get(pe->module);
+
+#define ip_vs_pe_put(pe) \
+ if (pe && pe->module) \
+ module_put(pe->module);
/*
* IPVS protocol functions (from ip_vs_proto.c)
*/
-extern int ip_vs_protocol_init(void);
-extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
-extern int *ip_vs_create_timeout_table(int *table, int size);
-extern int
-ip_vs_set_state_timeout(int *table, int num, const char *const *names,
- const char *name, int to);
-extern void
-ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
- const struct sk_buff *skb,
- int offset, const char *msg);
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+ const char *name, int to);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+ const struct sk_buff *skb, int offset,
+ const char *msg);
extern struct ip_vs_protocol ip_vs_protocol_tcp;
extern struct ip_vs_protocol ip_vs_protocol_udp;
@@ -843,125 +1358,134 @@ extern struct ip_vs_protocol ip_vs_protocol_sctp;
* Registering/unregistering scheduler functions
* (from ip_vs_sched.c)
*/
-extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
- struct ip_vs_scheduler *scheduler);
-extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc);
-extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
-extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
-extern struct ip_vs_conn *
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored);
-extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd, int *ignored,
+ struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+ struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
/*
* IPVS control data and functions (from ip_vs_ctl.c)
*/
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
extern struct ip_vs_stats ip_vs_stats;
-extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
-extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+struct ip_vs_service *
+ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport);
-static inline void ip_vs_service_put(struct ip_vs_service *svc)
+bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+ __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ __u16 protocol, __u32 fwmark, __u32 flags);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+
+static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
{
- atomic_dec(&svc->usecnt);
+ atomic_inc(&dest->refcnt);
}
-extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
- const union nf_inet_addr *daddr, __be16 dport);
-
-extern int ip_vs_use_count_inc(void);
-extern void ip_vs_use_count_dec(void);
-extern int ip_vs_control_init(void);
-extern void ip_vs_control_cleanup(void);
-extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
- const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
-extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
+{
+ smp_mb__before_atomic();
+ atomic_dec(&dest->refcnt);
+}
+static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
+{
+ if (atomic_dec_return(&dest->refcnt) < 0)
+ kfree(dest);
+}
/*
* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
-
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int stop_sync_thread(struct net *net, int state);
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
/*
* IPVS rate estimator prototypes (from ip_vs_est.c)
*/
-extern int ip_vs_estimator_init(void);
-extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+ struct ip_vs_stats *stats);
/*
* Various IPVS packet transmitters (from ip_vs_xmit.c)
*/
-extern int ip_vs_null_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_bypass_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_nat_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_tunnel_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_dr_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_icmp_xmit
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset);
-extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
#ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_nat_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_tunnel_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_dr_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern int ip_vs_icmp_xmit_v6
-(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
- int offset);
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
#endif
+#ifdef CONFIG_SYSCTL
/*
* This is a simple mechanism to ignore packets when
* we are loaded. Just set ip_vs_drop_rate to 'n' and
* we start to drop 1/rate of the packets
*/
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
- if (!ip_vs_drop_rate) return 0;
- if (--ip_vs_drop_counter > 0) return 0;
- ip_vs_drop_counter = ip_vs_drop_rate;
+ if (!ipvs->drop_rate)
+ return 0;
+ if (--ipvs->drop_counter > 0)
+ return 0;
+ ipvs->drop_counter = ipvs->drop_rate;
return 1;
}
+#else
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
+#endif
/*
* ip_vs_fwd_tag returns the forwarding tag of the connection
@@ -989,15 +1513,15 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
return fwd;
}
-extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
- struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
#ifdef CONFIG_IP_VS_IPV6
-extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
- struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
#endif
-extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
{
@@ -1031,10 +1555,10 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
enum ip_conntrack_info ctinfo;
- struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct || !nf_ct_is_untracked(ct)) {
- nf_reset(skb);
+ nf_conntrack_put(skb->nfct);
skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
@@ -1047,22 +1571,26 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
* Netfilter connection tracking
* (from ip_vs_nfct.c)
*/
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
- return sysctl_ip_vs_conntrack;
+#ifdef CONFIG_SYSCTL
+ return ipvs->sysctl_conntrack;
+#else
+ return 0;
+#endif
}
-extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
- int outin);
-extern int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp);
-extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
- struct ip_vs_conn *cp, u_int8_t proto,
- const __be16 port, int from_rs);
-extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+ int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+ struct ip_vs_conn *cp, u_int8_t proto,
+ const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
#else
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
return 0;
}
@@ -1072,8 +1600,7 @@ static inline void ip_vs_update_conntrack(struct sk_buff *skb,
{
}
-static inline int ip_vs_confirm_conntrack(struct sk_buff *skb,
- struct ip_vs_conn *cp)
+static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
{
return NF_ACCEPT;
}
@@ -1084,6 +1611,18 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif
-#endif /* __KERNEL__ */
+static inline int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+ /*
+ * We think the overhead of processing active connections is 256
+ * times higher than that of inactive connections in average. (This
+ * 256 times might not be accurate, we will change it later) We
+ * use the following formula to estimate the overhead now:
+ * dest->activeconns*256 + dest->inactconns
+ */
+ return (atomic_read(&dest->activeconns) << 8) +
+ atomic_read(&dest->inactconns);
+}
#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipip.h b/include/net/ipip.h
deleted file mode 100644
index a32654d5273..00000000000
--- a/include/net/ipip.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __NET_IPIP_H
-#define __NET_IPIP_H 1
-
-#include <linux/if_tunnel.h>
-#include <net/ip.h>
-
-/* Keep error state on tunnel for 30 sec */
-#define IPTUNNEL_ERR_TIMEO (30*HZ)
-
-/* 6rd prefix/relay information */
-struct ip_tunnel_6rd_parm {
- struct in6_addr prefix;
- __be32 relay_prefix;
- u16 prefixlen;
- u16 relay_prefixlen;
-};
-
-struct ip_tunnel {
- struct ip_tunnel __rcu *next;
- struct net_device *dev;
-
- int err_count; /* Number of arrived ICMP errors */
- unsigned long err_time; /* Time when the last ICMP error arrived */
-
- /* These four fields used only by GRE */
- __u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated GRE header length */
- int mlink;
-
- struct ip_tunnel_parm parms;
-
- /* for SIT */
-#ifdef CONFIG_IPV6_SIT_6RD
- struct ip_tunnel_6rd_parm ip6rd;
-#endif
- struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
- unsigned int prl_count; /* # of entries in PRL */
-};
-
-struct ip_tunnel_prl_entry {
- struct ip_tunnel_prl_entry __rcu *next;
- __be32 addr;
- u16 flags;
- struct rcu_head rcu_head;
-};
-
-#define __IPTUNNEL_XMIT(stats1, stats2) do { \
- int err; \
- int pkt_len = skb->len - skb_transport_offset(skb); \
- \
- skb->ip_summed = CHECKSUM_NONE; \
- ip_select_ident(iph, &rt->dst, NULL); \
- \
- err = ip_local_out(skb); \
- if (likely(net_xmit_eval(err) == 0)) { \
- (stats1)->tx_bytes += pkt_len; \
- (stats1)->tx_packets++; \
- } else { \
- (stats2)->tx_errors++; \
- (stats2)->tx_aborted_errors++; \
- } \
-} while (0)
-
-#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
-
-#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4a3cd2cd2f5..574337fe72d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -15,6 +15,7 @@
#include <linux/ipv6.h>
#include <linux/hardirq.h>
+#include <linux/jhash.h>
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/flow.h>
@@ -34,11 +35,13 @@
#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
#define NEXTHDR_ROUTING 43 /* Routing header. */
#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE 47 /* GRE header. */
#define NEXTHDR_ESP 50 /* Encapsulating security payload. */
#define NEXTHDR_AUTH 51 /* Authentication header. */
#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
#define NEXTHDR_NONE 59 /* No next header */
#define NEXTHDR_DEST 60 /* Destination options header. */
+#define NEXTHDR_SCTP 132 /* SCTP message. */
#define NEXTHDR_MOBILITY 135 /* Mobility header. */
#define NEXTHDR_MAX 255
@@ -77,11 +80,9 @@
/*
* Addr scopes
*/
-#ifdef __KERNEL__
#define IPV6_ADDR_MC_SCOPE(a) \
((a)->s6_addr[1] & 0x0f) /* nonstandard */
#define __IPV6_ADDR_SCOPE_INVALID -1
-#endif
#define IPV6_ADDR_SCOPE_NODELOCAL 0x01
#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
#define IPV6_ADDR_SCOPE_SITELOCAL 0x05
@@ -89,6 +90,16 @@
#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
/*
+ * Addr flags
+ */
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
+ ((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a) \
+ ((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
+ ((a)->s6_addr[1] & 0x40)
+
+/*
* fragmentation header
*/
@@ -99,15 +110,16 @@ struct frag_hdr {
__be32 identification;
};
-#define IP6_MF 0x0001
+#define IP6_MF 0x0001
+#define IP6_OFFSET 0xFFF8
-#ifdef __KERNEL__
+#define IP6_REPLY_MARK(net, mark) \
+ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
#include <net/sock.h>
/* sysctls */
extern int sysctl_mld_max_msf;
-extern struct ctl_path net_ipv6_ctl_path[];
#define _DEVINC(net, statname, modifier, idev, field) \
({ \
@@ -117,6 +129,24 @@ extern struct ctl_path net_ipv6_ctl_path[];
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
})
+/* per device counters are atomic_long_t */
+#define _DEVINCATOMIC(net, statname, modifier, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+ SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+})
+
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
#define _DEVADD(net, statname, modifier, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
@@ -148,16 +178,16 @@ extern struct ctl_path net_ipv6_ctl_path[];
#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
_DEVUPD(net, ipv6, 64_BH, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \
- _DEVINC(net, icmpv6, , idev, field)
+ _DEVINCATOMIC(net, icmpv6, , idev, field)
#define ICMP6_INC_STATS_BH(net, idev, field) \
- _DEVINC(net, icmpv6, _BH, idev, field)
+ _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
- _DEVINC(net, icmpv6msg, , idev, field +256)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
- _DEVINC(net, icmpv6msg, _BH, idev, field +256)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
- _DEVINC(net, icmpv6msg, _BH, idev, field)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
struct ip6_ra_chain {
struct ip6_ra_chain *next;
@@ -192,14 +222,18 @@ struct ipv6_txoptions {
};
struct ip6_flowlabel {
- struct ip6_flowlabel *next;
+ struct ip6_flowlabel __rcu *next;
__be32 label;
atomic_t users;
struct in6_addr dst;
struct ipv6_txoptions *opt;
unsigned long linger;
+ struct rcu_head rcu;
u8 share;
- u32 owner;
+ union {
+ struct pid *pid;
+ kuid_t uid;
+ } owner;
unsigned long lastuse;
unsigned long expires;
struct net *fl_net;
@@ -207,20 +241,25 @@ struct ip6_flowlabel {
#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
struct ipv6_fl_socklist {
- struct ipv6_fl_socklist *next;
- struct ip6_flowlabel *fl;
+ struct ipv6_fl_socklist __rcu *next;
+ struct ip6_flowlabel *fl;
+ struct rcu_head rcu;
};
-extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
-extern struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
- struct ip6_flowlabel * fl,
- struct ipv6_txoptions * fopt);
-extern void fl6_free_socklist(struct sock *sk);
-extern int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
-extern int ip6_flowlabel_init(void);
-extern void ip6_flowlabel_cleanup(void);
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+ struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
+ int flags);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -228,28 +267,53 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
-extern int ip6_ra_control(struct sock *sk, int sel);
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct icmp6hdr *thdr, int len);
-extern int ipv6_parse_hopopts(struct sk_buff *skb);
+int ip6_ra_control(struct sock *sk, int sel);
-extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
-extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr __user *newopt,
- int newoptlen);
+int ipv6_parse_hopopts(struct sk_buff *skb);
+
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+ struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+ struct ipv6_txoptions *opt,
+ int newtype,
+ struct ipv6_opt_hdr __user *newopt,
+ int newoptlen);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
-extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+
+static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+{
+ /* If forwarding is enabled, RA are not accepted unless the special
+ * hybrid mode (accept_ra=2) is enabled.
+ */
+ return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
+ idev->cnf.accept_ra;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ip6_frag_nqueues(struct net *net)
+{
+ return net->ipv6.frags.nqueues;
+}
-int ip6_frag_nqueues(struct net *net);
-int ip6_frag_mem(struct net *net);
+static inline int ip6_frag_mem(struct net *net)
+{
+ return sum_frag_mem_limit(&net->ipv6.frags);
+}
+#endif
-#define IPV6_FRAG_HIGH_THRESH (256 * 1024) /* 262144 */
-#define IPV6_FRAG_LOW_THRESH (192 * 1024) /* 196608 */
+#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
+#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
-extern int __ipv6_addr_type(const struct in6_addr *addr);
+int __ipv6_addr_type(const struct in6_addr *addr);
static inline int ipv6_addr_type(const struct in6_addr *addr)
{
return __ipv6_addr_type(addr) & 0xffff;
@@ -270,24 +334,40 @@ static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
}
+static inline bool __ipv6_addr_needs_scope_id(int type)
+{
+ return type & IPV6_ADDR_LINKLOCAL ||
+ (type & IPV6_ADDR_MULTICAST &&
+ (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
+}
+
+static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
+{
+ return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
+}
+
static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
{
return memcmp(a1, a2, sizeof(struct in6_addr));
}
-static inline int
+static inline bool
ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ulm = (const unsigned long *)m;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
+ ((ul1[1] ^ ul2[1]) & ulm[1]));
+#else
return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
-}
-
-static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
-{
- memcpy(a1, a2, sizeof(struct in6_addr));
+#endif
}
static inline void ipv6_addr_prefix(struct in6_addr *pfx,
@@ -304,50 +384,96 @@ static inline void ipv6_addr_prefix(struct in6_addr *pfx,
pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
}
+static inline void __ipv6_addr_set_half(__be32 *addr,
+ __be32 wh, __be32 wl)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#if defined(__BIG_ENDIAN)
+ if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
+ *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
+ return;
+ }
+#elif defined(__LITTLE_ENDIAN)
+ if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
+ *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
+ return;
+ }
+#endif
+#endif
+ addr[0] = wh;
+ addr[1] = wl;
+}
+
static inline void ipv6_addr_set(struct in6_addr *addr,
__be32 w1, __be32 w2,
__be32 w3, __be32 w4)
{
- addr->s6_addr32[0] = w1;
- addr->s6_addr32[1] = w2;
- addr->s6_addr32[2] = w3;
- addr->s6_addr32[3] = w4;
+ __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
+ __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
}
-static inline int ipv6_addr_equal(const struct in6_addr *a1,
- const struct in6_addr *a2)
+static inline bool ipv6_addr_equal(const struct in6_addr *a1,
+ const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
(a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
(a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
(a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
+#endif
+}
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
+ const __be64 *a2,
+ unsigned int len)
+{
+ if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
+ return false;
+ return true;
}
-static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
- unsigned int prefixlen)
+static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
+ const struct in6_addr *addr2,
+ unsigned int prefixlen)
+{
+ const __be64 *a1 = (const __be64 *)addr1;
+ const __be64 *a2 = (const __be64 *)addr2;
+
+ if (prefixlen >= 64) {
+ if (a1[0] ^ a2[0])
+ return false;
+ return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
+ }
+ return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
+}
+#else
+static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
+ const struct in6_addr *addr2,
+ unsigned int prefixlen)
{
- unsigned pdw, pbi;
+ const __be32 *a1 = addr1->s6_addr32;
+ const __be32 *a2 = addr2->s6_addr32;
+ unsigned int pdw, pbi;
/* check complete u32 in prefix */
pdw = prefixlen >> 5;
if (pdw && memcmp(a1, a2, pdw << 2))
- return 0;
+ return false;
/* check incomplete u32 in prefix */
pbi = prefixlen & 0x1f;
if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
- return 0;
-
- return 1;
-}
+ return false;
-static inline int ipv6_prefix_equal(const struct in6_addr *a1,
- const struct in6_addr *a2,
- unsigned int prefixlen)
-{
- return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32,
- prefixlen);
+ return true;
}
+#endif
struct inet_frag_queue;
@@ -364,40 +490,107 @@ enum ip6_defrag_users {
struct ip6_create_arg {
__be32 id;
u32 user;
- struct in6_addr *src;
- struct in6_addr *dst;
+ const struct in6_addr *src;
+ const struct in6_addr *dst;
+ u8 ecn;
};
void ip6_frag_init(struct inet_frag_queue *q, void *a);
-int ip6_frag_match(struct inet_frag_queue *q, void *a);
+bool ip6_frag_match(struct inet_frag_queue *q, void *a);
-static inline int ipv6_addr_any(const struct in6_addr *a)
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ __be32 id; /* fragment id */
+ u32 user;
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+
+ int iif;
+ unsigned int csum;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+ struct inet_frags *frags);
+
+static inline bool ipv6_addr_any(const struct in6_addr *a)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+
+ return (ul[0] | ul[1]) == 0UL;
+#else
return (a->s6_addr32[0] | a->s6_addr32[1] |
a->s6_addr32[2] | a->s6_addr32[3]) == 0;
+#endif
+}
+
+static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+ unsigned long x = ul[0] ^ ul[1];
+
+ return (u32)(x ^ (x >> 32));
+#else
+ return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
+ a->s6_addr32[2] ^ a->s6_addr32[3]);
+#endif
+}
+
+/* more secured version of ipv6_addr_hash() */
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
+{
+ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
+
+ return jhash_3words(v,
+ (__force u32)a->s6_addr32[2],
+ (__force u32)a->s6_addr32[3],
+ initval);
}
-static inline int ipv6_addr_loopback(const struct in6_addr *a)
+static inline bool ipv6_addr_loopback(const struct in6_addr *a)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+
+ return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL;
+#else
return (a->s6_addr32[0] | a->s6_addr32[1] |
a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
+#endif
}
-static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
{
- return (a->s6_addr32[0] | a->s6_addr32[1] |
- (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0;
+ return (
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ *(__be64 *)a |
+#else
+ (a->s6_addr32[0] | a->s6_addr32[1]) |
+#endif
+ (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
}
/*
* Check for a RFC 4843 ORCHID address
* (Overlay Routable Cryptographic Hash Identifiers)
*/
-static inline int ipv6_addr_orchid(const struct in6_addr *a)
+static inline bool ipv6_addr_orchid(const struct in6_addr *a)
{
return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
}
+static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
+{
+ return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
+}
+
static inline void ipv6_addr_set_v4mapped(const __be32 addr,
struct in6_addr *v4mapped)
{
@@ -411,7 +604,7 @@ static inline void ipv6_addr_set_v4mapped(const __be32 addr,
* find the first different bit between two addresses
* length of address must be a multiple of 32bits
*/
-static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
+static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
{
const __be32 *a1 = token1, *a2 = token2;
int i;
@@ -443,23 +636,77 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a
return addrlen << 5;
}
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
+{
+ const __be64 *a1 = token1, *a2 = token2;
+ int i;
+
+ addrlen >>= 3;
+
+ for (i = 0; i < addrlen; i++) {
+ __be64 xb = a1[i] ^ a2[i];
+ if (xb)
+ return i * 64 + 63 - __fls(be64_to_cpu(xb));
+ }
+
+ return addrlen << 6;
+}
+#endif
+
+static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ if (__builtin_constant_p(addrlen) && !(addrlen & 7))
+ return __ipv6_addr_diff64(token1, token2, addrlen);
+#endif
+ return __ipv6_addr_diff32(token1, token2, addrlen);
+}
+
static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
{
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-static __inline__ void ipv6_select_ident(struct frag_hdr *fhdr)
+int ip6_dst_hoplimit(struct dst_entry *dst);
+
+static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
+ struct dst_entry *dst)
{
- static u32 ipv6_fragmentation_id = 1;
- static DEFINE_SPINLOCK(ip6_id_lock);
+ int hlimit;
+
+ if (ipv6_addr_is_multicast(&fl6->daddr))
+ hlimit = np->mcast_hops;
+ else
+ hlimit = np->hop_limit;
+ if (hlimit < 0)
+ hlimit = ip6_dst_hoplimit(dst);
+ return hlimit;
+}
- spin_lock_bh(&ip6_id_lock);
- fhdr->identification = htonl(ipv6_fragmentation_id);
- if (++ipv6_fragmentation_id == 0)
- ipv6_fragmentation_id = 1;
- spin_unlock_bh(&ip6_id_lock);
+/*
+ * Header manipulation
+ */
+static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
+ __be32 flowlabel)
+{
+ *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
}
+static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
+{
+ return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
+}
+
+static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
+{
+ return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
+}
+
+static inline u8 ip6_tclass(__be32 flowinfo)
+{
+ return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
+}
/*
* Prototypes exported by ipv6
*/
@@ -468,134 +715,113 @@ static __inline__ void ipv6_select_ident(struct frag_hdr *fhdr)
* rcv function (called from netdevice level)
*/
-extern int ipv6_rcv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev);
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
-extern int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sk_buff *skb);
/*
* upper-layer output functions
*/
-extern int ip6_xmit(struct sock *sk,
- struct sk_buff *skb,
- struct flowi *fl,
- struct ipv6_txoptions *opt);
-
-extern int ip6_nd_hdr(struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- int proto, int len);
-
-extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
-
-extern int ip6_append_data(struct sock *sk,
- int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
- void *from,
- int length,
- int transhdrlen,
- int hlimit,
- int tclass,
- struct ipv6_txoptions *opt,
- struct flowi *fl,
- struct rt6_info *rt,
- unsigned int flags,
- int dontfrag);
-
-extern int ip6_push_pending_frames(struct sock *sk);
-
-extern void ip6_flush_pending_frames(struct sock *sk);
-
-extern int ip6_dst_lookup(struct sock *sk,
- struct dst_entry **dst,
- struct flowi *fl);
-extern int ip6_dst_blackhole(struct sock *sk,
- struct dst_entry **dst,
- struct flowi *fl);
-extern int ip6_sk_dst_lookup(struct sock *sk,
- struct dst_entry **dst,
- struct flowi *fl);
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ struct ipv6_txoptions *opt, int tclass);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+int ip6_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen, int hlimit,
+ int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags, int dontfrag);
+
+int ip6_push_pending_frames(struct sock *sk);
+
+void ip6_flush_pending_frames(struct sock *sk);
+
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+ struct dst_entry *orig_dst);
/*
* skb processing functions
*/
-extern int ip6_output(struct sk_buff *skb);
-extern int ip6_forward(struct sk_buff *skb);
-extern int ip6_input(struct sk_buff *skb);
-extern int ip6_mc_input(struct sk_buff *skb);
+int ip6_output(struct sock *sk, struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
-extern int __ip6_local_out(struct sk_buff *skb);
-extern int ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out(struct sk_buff *skb);
/*
* Extension header (options) processing
*/
-extern void ipv6_push_nfrag_opts(struct sk_buff *skb,
- struct ipv6_txoptions *opt,
- u8 *proto,
- struct in6_addr **daddr_p);
-extern void ipv6_push_frag_opts(struct sk_buff *skb,
- struct ipv6_txoptions *opt,
- u8 *proto);
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto, struct in6_addr **daddr_p);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto);
-extern int ipv6_skip_exthdr(const struct sk_buff *, int start,
- u8 *nexthdrp);
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+ __be16 *frag_offp);
-extern int ipv6_ext_hdr(u8 nexthdr);
+bool ipv6_ext_hdr(u8 nexthdr);
-extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+enum {
+ IP6_FH_F_FRAG = (1 << 0),
+ IP6_FH_F_AUTH = (1 << 1),
+ IP6_FH_F_SKIP_RH = (1 << 2),
+};
-extern struct in6_addr *fl6_update_dst(struct flowi *fl,
- const struct ipv6_txoptions *opt,
- struct in6_addr *orig);
+/* find specified header and get offset to it */
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+ unsigned short *fragoff, int *fragflg);
+
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig);
/*
* socket options (ipv6_sockglue.c)
*/
-extern int ipv6_setsockopt(struct sock *sk, int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
-extern int ipv6_getsockopt(struct sock *sk, int level,
- int optname,
- char __user *optval,
- int __user *optlen);
-extern int compat_ipv6_setsockopt(struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
-extern int compat_ipv6_getsockopt(struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int __user *optlen);
-
-extern int ip6_datagram_connect(struct sock *sk,
- struct sockaddr *addr, int addr_len);
-
-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
-extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
- u32 info, u8 *payload);
-extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
-extern void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu);
-
-extern int inet6_release(struct socket *sock);
-extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len);
-extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer);
-extern int inet6_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg);
-
-extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+int ipv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
+int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
+ int addr_len);
+
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+ int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
/*
@@ -607,30 +833,27 @@ extern const struct proto_ops inet6_dgram_ops;
struct group_source_req;
struct group_filter;
-extern int ip6_mc_source(int add, int omode, struct sock *sk,
- struct group_source_req *pgsr);
-extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
-extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval,
- int __user *optlen);
-extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr, u32 rnd);
+int ip6_mc_source(int add, int omode, struct sock *sk,
+ struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+ struct group_filter __user *optval, int __user *optlen);
#ifdef CONFIG_PROC_FS
-extern int ac6_proc_init(struct net *net);
-extern void ac6_proc_exit(struct net *net);
-extern int raw6_proc_init(void);
-extern void raw6_proc_exit(void);
-extern int tcp6_proc_init(struct net *net);
-extern void tcp6_proc_exit(struct net *net);
-extern int udp6_proc_init(struct net *net);
-extern void udp6_proc_exit(struct net *net);
-extern int udplite6_proc_init(void);
-extern void udplite6_proc_exit(void);
-extern int ipv6_misc_proc_init(void);
-extern void ipv6_misc_proc_exit(void);
-extern int snmp6_register_dev(struct inet6_dev *idev);
-extern int snmp6_unregister_dev(struct inet6_dev *idev);
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
#else
static inline int ac6_proc_init(struct net *net) { return 0; }
@@ -640,16 +863,12 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#endif
#ifdef CONFIG_SYSCTL
-extern ctl_table ipv6_route_table_template[];
-extern ctl_table ipv6_icmp_table_template[];
-
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-extern int ipv6_sysctl_register(void);
-extern void ipv6_sysctl_unregister(void);
-extern int ipv6_static_sysctl_register(void);
-extern void ipv6_static_sysctl_unregister(void);
+extern struct ctl_table ipv6_route_table_template[];
+
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
#endif
-#endif /* __KERNEL__ */
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 05d7e4a88b4..0143180fecc 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -80,7 +80,6 @@ struct ipx_route {
atomic_t refcnt;
};
-#ifdef __KERNEL__
struct ipx_cb {
u8 ipx_tctrl;
__be32 ipx_dest_net;
@@ -116,7 +115,6 @@ static inline struct ipx_sock *ipx_sk(struct sock *sk)
}
#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
-#endif
#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
@@ -125,23 +123,34 @@ extern struct list_head ipx_routes;
extern rwlock_t ipx_routes_lock;
extern struct list_head ipx_interfaces;
-extern struct ipx_interface *ipx_interfaces_head(void);
+struct ipx_interface *ipx_interfaces_head(void);
extern spinlock_t ipx_interfaces_lock;
extern struct ipx_interface *ipx_primary_net;
-extern int ipx_proc_init(void);
-extern void ipx_proc_exit(void);
+int ipx_proc_init(void);
+void ipx_proc_exit(void);
-extern const char *ipx_frame_name(__be16);
-extern const char *ipx_device_name(struct ipx_interface *intrfc);
+const char *ipx_frame_name(__be16);
+const char *ipx_device_name(struct ipx_interface *intrfc);
static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
{
atomic_inc(&intrfc->refcnt);
}
-extern void ipxitf_down(struct ipx_interface *intrfc);
+void ipxitf_down(struct ipx_interface *intrfc);
+struct ipx_interface *ipxitf_find_using_net(__be32 net);
+int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
+__be16 ipx_cksum(struct ipxhdr *packet, int length);
+int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
+ unsigned char *node);
+void ipxrtr_del_routes(struct ipx_interface *intrfc);
+int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
+ struct iovec *iov, size_t len, int noblock);
+int ipxrtr_route_skb(struct sk_buff *skb);
+struct ipx_route *ipxrtr_lookup(__be32 net);
+int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
{
diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h
index 0ce93398720..63ae3253056 100644
--- a/include/net/irda/discovery.h
+++ b/include/net/irda/discovery.h
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
index 69b610acd2d..2a580ce9eda 100644
--- a/include/net/irda/ircomm_core.h
+++ b/include/net/irda/ircomm_core.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_event.h b/include/net/irda/ircomm_event.h
index bc0c6f31f1c..5bbc32998d5 100644
--- a/include/net/irda/ircomm_event.h
+++ b/include/net/irda/ircomm_event.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_lmp.h b/include/net/irda/ircomm_lmp.h
index ae02106be59..5042a5021a0 100644
--- a/include/net/irda/ircomm_lmp.h
+++ b/include/net/irda/ircomm_lmp.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_param.h b/include/net/irda/ircomm_param.h
index e6678800c41..1f67432321c 100644
--- a/include/net/irda/ircomm_param.h
+++ b/include/net/irda/ircomm_param.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_ttp.h b/include/net/irda/ircomm_ttp.h
index 403081ed725..c5627288bca 100644
--- a/include/net/irda/ircomm_ttp.h
+++ b/include/net/irda/ircomm_ttp.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index eea2e615238..8d4f588974b 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -52,21 +50,16 @@
/* Same for payload size. See qos.c for the smallest max data size */
#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
-/* Those are really defined in include/linux/serial.h - Jean II */
-#define ASYNC_B_INITIALIZED 31 /* Serial port was initialized */
-#define ASYNC_B_NORMAL_ACTIVE 29 /* Normal device is active */
-#define ASYNC_B_CLOSING 27 /* Serial port is closing */
-
/*
* IrCOMM TTY driver state
*/
struct ircomm_tty_cb {
irda_queue_t queue; /* Must be first */
+ struct tty_port port;
magic_t magic;
int state; /* Connect state */
- struct tty_struct *tty;
struct ircomm_cb *ircomm; /* IrCOMM layer instance */
struct sk_buff *tx_skb; /* Transmit buffer */
@@ -80,7 +73,6 @@ struct ircomm_tty_cb {
LOCAL_FLOW flow; /* IrTTP flow status */
int line;
- unsigned long flags;
__u8 dlsap_sel;
__u8 slsap_sel;
@@ -97,19 +89,10 @@ struct ircomm_tty_cb {
void *skey;
void *ckey;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
struct timer_list watchdog_timer;
struct work_struct tqueue;
- unsigned short close_delay;
- unsigned short closing_wait; /* time to wait before closing */
-
- int open_count;
- int blocked_open; /* # of blocked opens */
-
/* Protect concurent access to :
- * o self->open_count
* o self->ctrl_skb
* o self->tx_skb
* Maybe other things may gain to be protected as well...
@@ -120,13 +103,13 @@ struct ircomm_tty_cb {
void ircomm_tty_start(struct tty_struct *tty);
void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
-extern int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file);
-extern int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear);
-extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-extern void ircomm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios);
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios);
#endif
diff --git a/include/net/irda/ircomm_tty_attach.h b/include/net/irda/ircomm_tty_attach.h
index 0a63bbb972d..20dcbdf258c 100644
--- a/include/net/irda/ircomm_tty_attach.h
+++ b/include/net/irda/ircomm_tty_attach.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 3bed61d379a..a059465101f 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -112,20 +112,19 @@ do { if(!(expr)) { \
struct net_device;
struct packet_type;
-extern void irda_proc_register(void);
-extern void irda_proc_unregister(void);
+void irda_proc_register(void);
+void irda_proc_unregister(void);
-extern int irda_sysctl_register(void);
-extern void irda_sysctl_unregister(void);
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
-extern int irsock_init(void);
-extern void irsock_cleanup(void);
+int irsock_init(void);
+void irsock_cleanup(void);
-extern int irda_nl_register(void);
-extern void irda_nl_unregister(void);
+int irda_nl_register(void);
+void irda_nl_unregister(void);
-extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev);
#endif /* NET_IRDA_H */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index 94c852d47d0..664bf817841 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -162,7 +160,7 @@ typedef struct {
int irq, irq2; /* Interrupts used */
int dma, dma2; /* DMA channel(s) used */
int fifo_size; /* FIFO size */
- int irqflags; /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */
+ int irqflags; /* interrupt flags (ie, IRQF_SHARED) */
int direction; /* Link direction, used by some FIR drivers */
int enabled; /* Powered on? */
int suspended; /* Suspended by APM */
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 0af8b8dfbc2..550c2d6ec7f 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <net/irda/irttp.h>
@@ -161,7 +162,7 @@ struct irlan_provider_cb {
int access_type; /* Access type */
__u16 send_arb_val;
- __u8 mac_address[6]; /* Generated MAC address for peer device */
+ __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
};
/*
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index 17fcd964f9d..fb4b76d5d7f 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -204,7 +204,7 @@ struct irlap_cb {
notify_t notify; /* Callbacks to IrLMP */
- int mtt_required; /* Minumum turnaround time required */
+ int mtt_required; /* Minimum turnaround time required */
int xbofs_delay; /* Nr of XBOF's used to MTT */
int bofs_count; /* Negotiated extra BOFs */
int next_bofs; /* Negotiated extra BOFs after next frame */
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
index 4c90824c50f..e4325fee126 100644
--- a/include/net/irda/irlap_event.h
+++ b/include/net/irda/irlap_event.h
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -126,6 +124,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
void irlap_print_event(IRLAP_EVENT event);
-extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
#endif
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 6b1dc4f8eca..cbc12a926e5 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -163,7 +161,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
__u8 caddr, int command);
-extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
- struct sk_buff *skb);
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+ struct sk_buff *skb);
#endif
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
index fff11b7fe8a..f132924cc9d 100644
--- a/include/net/irda/irlmp.h
+++ b/include/net/irda/irlmp.h
@@ -134,7 +134,7 @@ typedef struct {
} CACHE_ENTRY;
/*
- * Information about each registred IrLAP layer
+ * Information about each registered IrLAP layer
*/
struct lap_cb {
irda_queue_t queue; /* Must be first */
@@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
return (self && self->lap) ? self->lap->daddr : 0;
}
-extern const char *irlmp_reasons[];
+const char *irlmp_reason_str(LM_REASON reason);
+
extern int sysctl_discovery_timeout;
extern int sysctl_discovery_slots;
extern int sysctl_discovery;
@@ -278,7 +279,7 @@ static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
}
/* After doing a irlmp_dup(), this get one of the two socket back into
- * a state where it's waiting incomming connections.
+ * a state where it's waiting incoming connections.
* Note : this can be used *only* if the socket is not yet connected
* (i.e. NO irlmp_connect_response() done on this socket).
* - Jean II */
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h
index af4b87721d1..98682d4bae8 100644
--- a/include/net/irda/irttp.h
+++ b/include/net/irda/irttp.h
@@ -185,7 +185,7 @@ static inline __u32 irttp_get_max_seg_size(struct tsap_cb *self)
}
/* After doing a irttp_dup(), this get one of the two socket back into
- * a state where it's waiting incomming connections.
+ * a state where it's waiting incoming connections.
* Note : this can be used *only* if the socket is not yet connected
* (i.e. NO irttp_connect_response() done on this socket).
* - Jean II */
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
index c0d938847bd..42713c931d1 100644
--- a/include/net/irda/parameters.h
+++ b/include/net/irda/parameters.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Michel Dänzer <daenzer@debian.org>, 10/2001
* - simplify irda_pv_t to avoid endianness issues
diff --git a/include/net/irda/qos.h b/include/net/irda/qos.h
index cc577dc0a0e..05a5a249956 100644
--- a/include/net/irda/qos.h
+++ b/include/net/irda/qos.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h
index 2942ad6ab93..eef53ebe3d7 100644
--- a/include/net/irda/wrapper.h
+++ b/include/net/irda/wrapper.h
@@ -42,7 +42,7 @@
#define IRDA_TRANS 0x20 /* Asynchronous transparency modifier */
-/* States for receving a frame in async mode */
+/* States for receiving a frame in async mode */
enum {
OUTSIDE_FRAME,
BEGIN_FRAME,
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f82a1e87737..714cc9a54a4 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/poll.h>
#include <linux/socket.h>
+#include <net/iucv/iucv.h>
#ifndef AF_IUCV
#define AF_IUCV 32
@@ -26,13 +27,13 @@ enum {
IUCV_OPEN,
IUCV_BOUND,
IUCV_LISTEN,
- IUCV_SEVERED,
IUCV_DISCONN,
IUCV_CLOSING,
IUCV_CLOSED
};
#define IUCV_QUEUELEN_DEFAULT 65535
+#define IUCV_HIPER_MSGLIM_DEFAULT 128
#define IUCV_CONN_TIMEOUT (HZ * 40)
#define IUCV_DISCONN_TIMEOUT (HZ * 2)
#define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
@@ -57,8 +58,52 @@ struct sock_msg_q {
spinlock_t lock;
};
+#define AF_IUCV_FLAG_ACK 0x1
+#define AF_IUCV_FLAG_SYN 0x2
+#define AF_IUCV_FLAG_FIN 0x4
+#define AF_IUCV_FLAG_WIN 0x8
+#define AF_IUCV_FLAG_SHT 0x10
+
+struct af_iucv_trans_hdr {
+ u16 magic;
+ u8 version;
+ u8 flags;
+ u16 window;
+ char destNodeID[8];
+ char destUserID[8];
+ char destAppName[16];
+ char srcNodeID[8];
+ char srcUserID[8];
+ char srcAppName[16]; /* => 70 bytes */
+ struct iucv_message iucv_hdr; /* => 33 bytes */
+ u8 pad; /* total 104 bytes */
+} __packed;
+
+enum iucv_tx_notify {
+ /* transmission of skb is completed and was successful */
+ TX_NOTIFY_OK = 0,
+ /* target is unreachable */
+ TX_NOTIFY_UNREACHABLE = 1,
+ /* transfer pending queue full */
+ TX_NOTIFY_TPQFULL = 2,
+ /* general error */
+ TX_NOTIFY_GENERALERROR = 3,
+ /* transmission of skb is pending - may interleave
+ * with TX_NOTIFY_DELAYED_* */
+ TX_NOTIFY_PENDING = 4,
+ /* transmission of skb was done successfully (delayed) */
+ TX_NOTIFY_DELAYED_OK = 5,
+ /* target unreachable (detected delayed) */
+ TX_NOTIFY_DELAYED_UNREACHABLE = 6,
+ /* general error (detected delayed) */
+ TX_NOTIFY_DELAYED_GENERALERROR = 7,
+};
+
#define iucv_sk(__sk) ((struct iucv_sock *) __sk)
+#define AF_IUCV_TRANS_IUCV 0
+#define AF_IUCV_TRANS_HIPER 1
+
struct iucv_sock {
struct sock sk;
char src_user_id[8];
@@ -69,17 +114,34 @@ struct iucv_sock {
spinlock_t accept_q_lock;
struct sock *parent;
struct iucv_path *path;
+ struct net_device *hs_dev;
struct sk_buff_head send_skb_q;
struct sk_buff_head backlog_skb_q;
struct sock_msg_q message_q;
unsigned int send_tag;
u8 flags;
u16 msglimit;
+ u16 msglimit_peer;
+ atomic_t msg_sent;
+ atomic_t msg_recv;
+ atomic_t pendings;
+ int transport;
+ void (*sk_txnotify)(struct sk_buff *skb,
+ enum iucv_tx_notify n);
+};
+
+struct iucv_skb_cb {
+ u32 class; /* target class of message */
+ u32 tag; /* tag associated with message */
+ u32 offset; /* offset for skb receival */
};
+#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
+
/* iucv socket options (SOL_IUCV) */
#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
+#define SO_MSGSIZE 0x0800 /* get maximum msgsize */
/* iucv related control messages (scm) */
#define SCM_IUCV_TRGCLS 0x0001 /* target class control message */
@@ -94,7 +156,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
void iucv_accept_unlink(struct sock *sk);
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 205a3360156..0894ced3195 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -120,7 +120,7 @@ struct iucv_message {
u32 reply_size;
u8 rmmsg[8];
u8 flags;
-};
+} __packed;
/*
* struct iucv_handler
@@ -173,7 +173,7 @@ struct iucv_handler {
/*
* The message_pending function is called after an icuv interrupt
* type 0x06 or type 0x07 has been received. A new message is
- * availabe and can be received with iucv_message_receive.
+ * available and can be received with iucv_message_receive.
*/
void (*message_pending)(struct iucv_path *, struct iucv_message *);
/*
@@ -459,3 +459,37 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
void *answer, size_t asize, size_t *residual);
+
+struct iucv_interface {
+ int (*message_receive)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual);
+ int (*__message_receive)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, void *buffer, size_t size,
+ size_t *residual);
+ int (*message_reply)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size);
+ int (*message_reject)(struct iucv_path *path, struct iucv_message *msg);
+ int (*message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*__message_send)(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+ int (*message_send2way)(struct iucv_path *path,
+ struct iucv_message *msg, u8 flags, u32 srccls, void *buffer,
+ size_t size, void *answer, size_t asize, size_t *residual);
+ int (*message_purge)(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls);
+ int (*path_accept)(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userdata[16], void *private);
+ int (*path_connect)(struct iucv_path *path,
+ struct iucv_handler *handler,
+ u8 userid[8], u8 system[8], u8 userdata[16], void *private);
+ int (*path_quiesce)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_resume)(struct iucv_path *path, u8 userdata[16]);
+ int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
+ int (*iucv_register)(struct iucv_handler *handler, int smp);
+ void (*iucv_unregister)(struct iucv_handler *handler, int smp);
+ struct bus_type *bus;
+ struct device *root;
+};
+
+extern struct iucv_interface iucv_if;
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 3afdb21cc31..a830b01baba 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -91,7 +91,7 @@
* --------------------
* The implementation goals were as follow :
* o Obvious : you should not need a PhD to understand what's happening,
- * the benefit is easier maintainance.
+ * the benefit is easier maintenance.
* o Flexible : it should accommodate a wide variety of driver
* implementations and be as flexible as the old API.
* o Lean : it should be efficient memory wise to minimise the impact
@@ -129,7 +129,7 @@
*
* Functions prototype uses union iwreq_data
* -----------------------------------------
- * Some would have prefered functions defined this way :
+ * Some would have preferred functions defined this way :
* static int mydriver_ioctl_setrate(struct net_device *dev,
* long rate, int auto)
* 1) The kernel code doesn't "validate" the content of iwreq_data, and
@@ -432,44 +432,32 @@ struct iw_public_data {
/* First : function strictly used inside the kernel */
/* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
- int length);
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Second : functions that may be called by driver modules */
/* Send a single event to user space */
-extern void wireless_send_event(struct net_device * dev,
- unsigned int cmd,
- union iwreq_data * wrqu,
- const char * extra);
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+ union iwreq_data *wrqu, const char *extra);
/* We may need a function to send a stream of events to user space.
* More on that later... */
/* Standard handler for SIOCSIWSPY */
-extern int iw_handler_set_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCGIWSPY */
-extern int iw_handler_get_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCSIWTHRSPY */
-extern int iw_handler_set_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCGIWTHRSPY */
-extern int iw_handler_get_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Driver call to update spy records */
-extern void wireless_spy_update(struct net_device * dev,
- unsigned char * address,
- struct iw_quality * wstats);
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+ struct iw_quality *wstats);
/************************* INLINE FUNTIONS *************************/
/*
diff --git a/include/net/lapb.h b/include/net/lapb.h
index 96cb5ddaa9f..9510f8725f0 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -95,7 +95,7 @@ struct lapb_cb {
struct sk_buff_head write_queue;
struct sk_buff_head ack_queue;
unsigned char window;
- struct lapb_register_struct callbacks;
+ const struct lapb_register_struct *callbacks;
/* FRMR control information */
struct lapb_frame frmr_data;
@@ -105,40 +105,40 @@ struct lapb_cb {
};
/* lapb_iface.c */
-extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_connect_indication(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
-extern int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
-extern int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
/* lapb_in.c */
-extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
/* lapb_out.c */
-extern void lapb_kick(struct lapb_cb *lapb);
-extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
-extern void lapb_establish_data_link(struct lapb_cb *lapb);
-extern void lapb_enquiry_response(struct lapb_cb *lapb);
-extern void lapb_timeout_response(struct lapb_cb *lapb);
-extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
/* lapb_subr.c */
-extern void lapb_clear_queues(struct lapb_cb *lapb);
-extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_requeue_frames(struct lapb_cb *lapb);
-extern int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
-extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
-extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
-extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
/* lapb_timer.c */
-extern void lapb_start_t1timer(struct lapb_cb *lapb);
-extern void lapb_start_t2timer(struct lapb_cb *lapb);
-extern void lapb_stop_t1timer(struct lapb_cb *lapb);
-extern void lapb_stop_t2timer(struct lapb_cb *lapb);
-extern int lapb_t1timer_running(struct lapb_cb *lapb);
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
/*
* Debug levels.
@@ -149,4 +149,10 @@ extern int lapb_t1timer_running(struct lapb_cb *lapb);
*/
#define LAPB_DEBUG 0
+#define lapb_dbg(level, fmt, ...) \
+do { \
+ if (level < LAPB_DEBUG) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
#endif
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
index 848cce1bb7a..be95b926280 100644
--- a/include/net/lib80211.h
+++ b/include/net/lib80211.h
@@ -25,12 +25,13 @@
#include <linux/types.h>
#include <linux/list.h>
-#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <linux/timer.h>
+#include <linux/seq_file.h>
+
/* print_ssid() is intended to be used in debug (and possibly error)
* messages. It should never be used for passing ssid to user space. */
const char *print_ssid(char *buf, const char *ssid, u8 ssid_len);
@@ -42,6 +43,8 @@ enum {
IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
};
+struct module;
+
struct lib80211_crypto_ops {
const char *name;
struct list_head list;
@@ -74,7 +77,7 @@ struct lib80211_crypto_ops {
/* procfs handler for printing out key information and possible
* statistics */
- char *(*print_stats) (char *p, void *priv);
+ void (*print_stats) (struct seq_file *m, void *priv);
/* Crypto specific flag get/set for configuration settings */
unsigned long (*get_flags) (void *priv);
@@ -117,10 +120,7 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
-void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *, int);
-void lib80211_crypt_deinit_handler(unsigned long);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt);
-void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
#endif /* LIB80211_H */
diff --git a/include/net/llc.h b/include/net/llc.h
index 5503b74ab17..e8e61d4fb45 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -20,7 +20,7 @@
#include <linux/hash.h>
#include <linux/jhash.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct net_device;
struct packet_type;
@@ -93,31 +93,30 @@ struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
#define LLC_DEST_CONN 2 /* Type 2 goes here */
extern struct list_head llc_sap_list;
-extern spinlock_t llc_sap_list_lock;
-extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
-extern int llc_mac_hdr_init(struct sk_buff *skb,
- const unsigned char *sa, const unsigned char *da);
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+ const unsigned char *da);
-extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
- struct sk_buff *skb));
-extern void llc_remove_pack(int type);
+void llc_add_pack(int type,
+ void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
-extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
-extern struct llc_sap *llc_sap_open(unsigned char lsap,
- int (*rcv)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
+struct llc_sap *llc_sap_open(unsigned char lsap,
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
static inline void llc_sap_hold(struct llc_sap *sap)
{
atomic_inc(&sap->refcnt);
}
-extern void llc_sap_close(struct llc_sap *sap);
+void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
{
@@ -125,33 +124,32 @@ static inline void llc_sap_put(struct llc_sap *sap)
llc_sap_close(sap);
}
-extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+struct llc_sap *llc_sap_find(unsigned char sap_value);
-extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
-extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_station_init(void);
-extern void llc_station_exit(void);
+void llc_station_init(void);
+void llc_station_exit(void);
#ifdef CONFIG_PROC_FS
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
+int llc_proc_init(void);
+void llc_proc_exit(void);
#else
#define llc_proc_init() (0)
#define llc_proc_exit() do { } while(0)
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SYSCTL
-extern int llc_sysctl_init(void);
-extern void llc_sysctl_exit(void);
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
extern int sysctl_llc2_ack_timeout;
extern int sysctl_llc2_busy_timeout;
extern int sysctl_llc2_p_timeout;
extern int sysctl_llc2_rej_timeout;
-extern int sysctl_llc_station_ack_timeout;
#else
#define llc_sysctl_init() (0)
#define llc_sysctl_exit() do { } while(0)
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index df83f69d2de..f3be818e73c 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -89,114 +89,92 @@
typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
-extern u8 llc_circular_between(u8 a, u8 b, u8 c);
-extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+void llc_conn_rej_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
#endif /* LLC_C_AC_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 23a409381fa..3948cf111dd 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -128,142 +128,97 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_detected(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
/* NOT_USED functions and their variations */
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
/* Available connection action qualifiers */
-extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
- struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
{
return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
- (unsigned)sk->sk_rcvbuf;
+ (unsigned int)sk->sk_rcvbuf;
}
#endif /* LLC_C_EV_H */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 2f97d8ddce9..0134681acc4 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -95,28 +95,24 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
return skb->cb[sizeof(skb->cb) - 1];
}
-extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
-extern void llc_sk_free(struct sock *sk);
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot);
+void llc_sk_free(struct sock *sk);
-extern void llc_sk_reset(struct sock *sk);
+void llc_sk_reset(struct sock *sk);
/* Access to a connection */
-extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
- u8 first_p_bit);
-extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
- u8 first_f_bit);
-extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
- u16 *how_many_unacked);
-extern struct sock *llc_lookup_established(struct llc_sap *sap,
- struct llc_addr *daddr,
- struct llc_addr *laddr);
-extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
-extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+ struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
-extern u8 llc_data_accept_state(u8 state);
-extern void llc_build_offset_table(void);
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
#endif /* LLC_CONN_H */
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index b595a004d31..8d5c543cd62 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,38 +62,7 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-/**
- * llc_mac_null - determines if a address is a null mac address
- * @mac: Mac address to test if null.
- *
- * Determines if a given address is a null mac address. Returns 0 if the
- * address is not a null mac, 1 if the address is a null mac.
- */
-static inline int llc_mac_null(const u8 *mac)
-{
- return is_zero_ether_addr(mac);
-}
-
-static inline int llc_mac_multicast(const u8 *mac)
-{
- return is_multicast_ether_addr(mac);
-}
-/**
- * llc_mac_match - determines if two mac addresses are the same
- * @mac1: First mac address to compare.
- * @mac2: Second mac address to compare.
- *
- * Determines if two given mac address are the same. Returns 0 if there
- * is not a complete match up to len, 1 if a complete match up to len is
- * found.
- */
-static inline int llc_mac_match(const u8 *mac1, const u8 *mac2)
-{
- return !compare_ether_addr(mac1, mac2);
-}
-
-extern int llc_establish_connection(struct sock *sk, u8 *lmac,
- u8 *dmac, u8 dsap);
-extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
-extern int llc_send_disc(struct sock *sk);
+int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 75b8e2968c9..c0f0a13ed81 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -13,7 +13,6 @@
*/
#include <linux/if_ether.h>
-#include <linux/if_tr.h>
/* Lengths of frame formats */
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
@@ -143,7 +142,7 @@
#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
-#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
+#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
/* FRMR information field macros */
@@ -199,7 +198,7 @@ struct llc_pdu_sn {
u8 ssap;
u8 ctrl_1;
u8 ctrl_2;
-};
+} __packed;
static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
{
@@ -211,7 +210,7 @@ struct llc_pdu_un {
u8 dsap;
u8 ssap;
u8 ctrl_1;
-};
+} __packed;
static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
{
@@ -253,10 +252,6 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
if (skb->protocol == htons(ETH_P_802_2))
memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
- else if (skb->protocol == htons(ETH_P_TR_802_2)) {
- memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
- *sa &= 0x7F;
- }
}
/**
@@ -270,8 +265,6 @@ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
if (skb->protocol == htons(ETH_P_802_2))
memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
- else if (skb->protocol == htons(ETH_P_TR_802_2))
- memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN);
}
/**
@@ -359,7 +352,7 @@ struct llc_xid_info {
u8 fmt_id; /* always 0x81 for LLC */
u8 type; /* different if NULL/non-NULL LSAP */
u8 rw; /* sender receive window */
-};
+} __packed;
/**
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,23 +408,22 @@ struct llc_frmr_info {
u8 curr_ssv; /* current send state variable val */
u8 curr_rsv; /* current receive state variable */
u8 ind_bits; /* indicator bits set with macro */
-};
-
-extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
-extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
-extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
-extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
-extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
-extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
- struct llc_pdu_sn *prev_pdu,
- u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
-extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+} __packed;
+
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+ u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
#endif /* LLC_PDU_H */
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
index 37a3bbd0239..a61b98c108e 100644
--- a/include/net/llc_s_ac.h
+++ b/include/net/llc_s_ac.h
@@ -25,15 +25,13 @@
/* All action functions must look like this */
typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
- struct sk_buff *skb);
-extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_report_status(struct llc_sap *sap,
- struct sk_buff *skb);
-extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
#endif /* LLC_S_AC_H */
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
index e3acb9329e4..84db3a59ed2 100644
--- a/include/net/llc_s_ev.h
+++ b/include/net/llc_s_ev.h
@@ -53,15 +53,14 @@ struct llc_sap;
typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
- struct sk_buff *skb);
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
#endif /* LLC_S_EV_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
index ed25bec2f64..1e4df9fd9fb 100644
--- a/include/net/llc_sap.h
+++ b/include/net/llc_sap.h
@@ -19,18 +19,14 @@ struct net_device;
struct sk_buff;
struct sock;
-extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
- unsigned char prim);
-extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
- u8 type, u32 data_size);
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+ unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+ u8 type, u32 data_size);
-extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
- struct sk_buff *skb,
- unsigned char *dmac,
- unsigned char dsap);
-extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
- struct sk_buff *skb,
- unsigned char *dmac,
- unsigned char dsap);
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
#endif /* LLC_SAP_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eaa4affd40c..421b6ecb4b2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -13,13 +13,13 @@
#ifndef MAC80211_H
#define MAC80211_H
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
-#include <linux/wireless.h>
-#include <linux/device.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
+#include <asm/unaligned.h>
/**
* DOC: Introduction
@@ -66,10 +66,6 @@
*
* Secondly, when the hardware handles fragmentation, the frame handed to
* the driver from mac80211 is the MSDU, not the MPDU.
- *
- * Finally, for received frames, the driver is able to indicate that it has
- * filled a radiotap header and put that in front of the frame; if it does
- * not do so then mac80211 may add this under certain circumstances.
*/
/**
@@ -87,15 +83,21 @@
*
*/
+struct device;
+
/**
* enum ieee80211_max_queues - maximum number of queues
*
* @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
+ * @IEEE80211_MAX_QUEUE_MAP: bitmap with maximum queues set
*/
enum ieee80211_max_queues {
- IEEE80211_MAX_QUEUES = 4,
+ IEEE80211_MAX_QUEUES = 16,
+ IEEE80211_MAX_QUEUE_MAP = BIT(IEEE80211_MAX_QUEUES) - 1,
};
+#define IEEE80211_INVAL_HW_QUEUE 0xff
+
/**
* enum ieee80211_ac_numbers - AC numbers as used in mac80211
* @IEEE80211_AC_VO: voice
@@ -109,6 +111,7 @@ enum ieee80211_ac_numbers {
IEEE80211_AC_BE = 2,
IEEE80211_AC_BK = 3,
};
+#define IEEE80211_NUM_ACS 4
/**
* struct ieee80211_tx_queue_params - transmit queue configuration
@@ -121,6 +124,7 @@ enum ieee80211_ac_numbers {
* 2^n-1 in the range 1..32767]
* @cw_max: maximum contention window [like @cw_min]
* @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @acm: is mandatory admission control required for the access category
* @uapsd: is U-APSD mode enabled for the queue
*/
struct ieee80211_tx_queue_params {
@@ -128,6 +132,7 @@ struct ieee80211_tx_queue_params {
u16 cw_min;
u16 cw_max;
u8 aifs;
+ bool acm;
bool uapsd;
};
@@ -139,6 +144,88 @@ struct ieee80211_low_level_stats {
};
/**
+ * enum ieee80211_chanctx_change - change flag for channel context
+ * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
+ * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
+ * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
+ * this is used only with channel switching with CSA
+ * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ */
+enum ieee80211_chanctx_change {
+ IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
+ IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
+ IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+};
+
+/**
+ * struct ieee80211_chanctx_conf - channel context that vifs may be tuned to
+ *
+ * This is the driver-visible part. The ieee80211_chanctx
+ * that contains it is visible in mac80211 only.
+ *
+ * @def: the channel definition
+ * @min_def: the minimum channel definition currently required.
+ * @rx_chains_static: The number of RX chains that must always be
+ * active on the channel to receive MIMO transmissions
+ * @rx_chains_dynamic: The number of RX chains that must be enabled
+ * after RTS/CTS handshake to receive SMPS MIMO transmissions;
+ * this will always be >= @rx_chains_static.
+ * @radar_enabled: whether radar detection is enabled on this channel.
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void *), size is determined in hw information.
+ */
+struct ieee80211_chanctx_conf {
+ struct cfg80211_chan_def def;
+ struct cfg80211_chan_def min_def;
+
+ u8 rx_chains_static, rx_chains_dynamic;
+
+ bool radar_enabled;
+
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+/**
+ * enum ieee80211_chanctx_switch_mode - channel context switch mode
+ * @CHANCTX_SWMODE_REASSIGN_VIF: Both old and new contexts already
+ * exist (and will continue to exist), but the virtual interface
+ * needs to be switched from one to the other.
+ * @CHANCTX_SWMODE_SWAP_CONTEXTS: The old context exists but will stop
+ * to exist with this call, the new context doesn't exist but
+ * will be active after this call, the virtual interface switches
+ * from the old to the new (note that the driver may of course
+ * implement this as an on-the-fly chandef switch of the existing
+ * hardware context, but the mac80211 pointer for the old context
+ * will cease to exist and only the new one will later be used
+ * for changes/removal.)
+ */
+enum ieee80211_chanctx_switch_mode {
+ CHANCTX_SWMODE_REASSIGN_VIF,
+ CHANCTX_SWMODE_SWAP_CONTEXTS,
+};
+
+/**
+ * struct ieee80211_vif_chanctx_switch - vif chanctx switch information
+ *
+ * This is structure is used to pass information about a vif that
+ * needs to switch from one chanctx to another. The
+ * &ieee80211_chanctx_switch_mode defines how the switch should be
+ * done.
+ *
+ * @vif: the vif that should be switched from old_ctx to new_ctx
+ * @old_ctx: the old context to which the vif was assigned
+ * @new_ctx: the new context to which the vif must be assigned
+ */
+struct ieee80211_vif_chanctx_switch {
+ struct ieee80211_vif *vif;
+ struct ieee80211_chanctx_conf *old_ctx;
+ struct ieee80211_chanctx_conf *new_ctx;
+};
+
+/**
* enum ieee80211_bss_change - BSS change notification flags
*
* These flags are used with the bss_info_changed() callback
@@ -164,13 +251,24 @@ struct ieee80211_low_level_stats {
* @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note
* that it is only ever disabled for station mode.
* @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
+ * @BSS_CHANGED_SSID: SSID changed for this BSS (AP and IBSS mode)
+ * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
+ * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
+ * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
+ * changed (currently only in P2P client mode, GO mode will be later)
+ * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
+ * currently dtim_period only is under consideration.
+ * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
+ * note that this is only called when it changes after the channel
+ * context had been assigned.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
BSS_CHANGED_ERP_CTS_PROT = 1<<1,
BSS_CHANGED_ERP_PREAMBLE = 1<<2,
BSS_CHANGED_ERP_SLOT = 1<<3,
- BSS_CHANGED_HT = 1<<4,
+ BSS_CHANGED_HT = 1<<4,
BSS_CHANGED_BASIC_RATES = 1<<5,
BSS_CHANGED_BEACON_INT = 1<<6,
BSS_CHANGED_BSSID = 1<<7,
@@ -181,6 +279,13 @@ enum ieee80211_bss_change {
BSS_CHANGED_ARP_FILTER = 1<<12,
BSS_CHANGED_QOS = 1<<13,
BSS_CHANGED_IDLE = 1<<14,
+ BSS_CHANGED_SSID = 1<<15,
+ BSS_CHANGED_AP_PROBE_RESP = 1<<16,
+ BSS_CHANGED_PS = 1<<17,
+ BSS_CHANGED_TXPOWER = 1<<18,
+ BSS_CHANGED_P2P_PS = 1<<19,
+ BSS_CHANGED_BEACON_INFO = 1<<20,
+ BSS_CHANGED_BANDWIDTH = 1<<21,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -193,6 +298,17 @@ enum ieee80211_bss_change {
#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
/**
+ * enum ieee80211_rssi_event - RSSI threshold event
+ * An indicator for when RSSI goes below/above a certain threshold.
+ * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver.
+ */
+enum ieee80211_rssi_event {
+ RSSI_EVENT_HIGH,
+ RSSI_EVENT_LOW,
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -201,6 +317,7 @@ enum ieee80211_bss_change {
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
+ * @ibss_creator: indicates if a new IBSS network is being created
* @aid: association ID number, valid only when @assoc is true
* @use_cts_prot: use CTS protection
* @use_short_preamble: use 802.11b short preamble;
@@ -210,22 +327,31 @@ enum ieee80211_bss_change {
* if the hardware cannot handle this it must set the
* IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
* @dtim_period: num of beacons before the next DTIM, for beaconing,
- * valid in station mode only while @assoc is true and if also
- * requested by %IEEE80211_HW_NEED_DTIM_PERIOD (cf. also hw conf
- * @ps_dtim_period)
- * @timestamp: beacon timestamp
+ * valid in station mode only if after the driver was notified
+ * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
+ * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old
+ * as it may have been received during scanning long ago). If the
+ * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
+ * only come from a beacon, but might not become valid until after
+ * association when a beacon is received (which is notified with the
+ * %BSS_CHANGED_DTIM flag.)
+ * @sync_device_ts: the device timestamp corresponding to the sync_tsf,
+ * the driver/device can use this to calculate synchronisation
+ * (see @sync_tsf)
+ * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
+ * is requested, see @sync_tsf/@sync_device_ts.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
* index into the rate table configured by the driver in
* the current band.
+ * @beacon_rate: associated AP's beacon TX rate
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
- * @channel_type: Channel type for this BSS -- the hardware might be
- * configured for HT40+ while this BSS only uses no-HT, for
- * example.
- * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info).
+ * @chandef: Channel definition for this BSS -- the hardware might be
+ * configured a higher bandwidth than this BSS uses, for example.
+ * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
* This field is only valid when the channel type is one of the HT types.
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
@@ -234,20 +360,26 @@ enum ieee80211_bss_change {
* may filter ARP queries targeted for other addresses than listed here.
* The driver must allow ARP queries targeted for all address listed here
* to pass through. An empty list implies no ARP queries need to pass.
- * @arp_addr_cnt: Number of addresses currently on the list.
- * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
- * filter ARP queries based on the @arp_addr_list, if disabled, the
- * hardware must not perform any ARP filtering. Note, that the filter will
- * be enabled also in promiscuous mode.
+ * @arp_addr_cnt: Number of addresses currently on the list. Note that this
+ * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list
+ * array size), it's up to the driver what to do in that case.
* @qos: This is a QoS-enabled BSS.
* @idle: This interface is idle. There's also a global idle flag in the
* hardware config which may be more appropriate depending on what
* your driver/device needs to do.
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ * offchannel/dynamic_ps operations.
+ * @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
+ * @ssid_len: Length of SSID given in @ssid.
+ * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
+ * @txpower: TX power in dBm
+ * @p2p_noa_attr: P2P NoA attribute for P2P powersave
*/
struct ieee80211_bss_conf {
const u8 *bssid;
/* association related data */
bool assoc, ibss_joined;
+ bool ibss_creator;
u16 aid;
/* erp related data */
bool use_cts_prot;
@@ -257,22 +389,30 @@ struct ieee80211_bss_conf {
u8 dtim_period;
u16 beacon_int;
u16 assoc_capability;
- u64 timestamp;
+ u64 sync_tsf;
+ u32 sync_device_ts;
+ u8 sync_dtim_count;
u32 basic_rates;
+ struct ieee80211_rate *beacon_rate;
int mcast_rate[IEEE80211_NUM_BANDS];
u16 ht_operation_mode;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
- enum nl80211_channel_type channel_type;
+ struct cfg80211_chan_def chandef;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
- u8 arp_addr_cnt;
- bool arp_filter_enabled;
+ int arp_addr_cnt;
bool qos;
bool idle;
+ bool ps;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ bool hidden_ssid;
+ int txpower;
+ struct ieee80211_p2p_noa_attr p2p_noa_attr;
};
/**
- * enum mac80211_tx_control_flags - flags to describe transmission information/status
+ * enum mac80211_tx_info_flags - flags to describe transmission information/status
*
* These flags are used with the @flags member of &ieee80211_tx_info.
*
@@ -312,6 +452,9 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
* set by rate control algorithms to indicate probe rate, will
* be cleared for fragmented frames (except on the last fragment)
+ * @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
+ * that a frame can be transmitted while the queues are stopped for
+ * off-channel operation.
* @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
* used to indicate that a pending frame requires TX processing before
* it can be sent out.
@@ -319,9 +462,9 @@ struct ieee80211_bss_conf {
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
* used to indicate frame should not be encrypted
- * @IEEE80211_TX_CTL_PSPOLL_RESPONSE: (internal?)
- * This frame is a response to a PS-poll frame and should be sent
- * although the station is in powersave mode.
+ * @IEEE80211_TX_CTL_NO_PS_BUFFER: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD) or a non-bufferable MMPDU and must
+ * be sent although the station is in powersave mode.
* @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the
* transmit function after the current frame, this can be used
* by drivers to kick the DMA queue only if unset or when the
@@ -329,19 +472,43 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted
* after TX status because the destination was asleep, it must not
* be modified again (no seqno assignment, crypto, etc.)
- * @IEEE80211_TX_INTFL_HAS_RADIOTAP: This frame was injected and still
- * has a radiotap header at skb->data.
+ * @IEEE80211_TX_INTFL_MLME_CONN_TX: This frame was transmitted by the MLME
+ * code for connection establishment, this indicates that its status
+ * should kick the MLME state machine.
* @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
* MLME command (internal to mac80211 to figure out whether to send TX
* status to user space)
* @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
* @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
* frame and selects the maximum number of streams that it can use.
+ * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on
+ * the off-channel channel when a remain-on-channel offload is done
+ * in hardware -- normal packets still flow and are expected to be
+ * handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ * testing. It will be sent out with incorrect Michael MIC key to allow
+ * TKIP countermeasures to be tested.
+ * @IEEE80211_TX_CTL_NO_CCK_RATE: This frame will be sent at non CCK rate.
+ * This flag is actually used for management frame especially for P2P
+ * frames not being sent at CCK rate in 2GHz band.
+ * @IEEE80211_TX_STATUS_EOSP: This packet marks the end of service period,
+ * when its status is reported the service period ends. For frames in
+ * an SP that mac80211 transmits, it is already set; for driver frames
+ * the driver may set this flag. It is also used to do the same for
+ * PS-Poll responses.
+ * @IEEE80211_TX_CTL_USE_MINRATE: This frame will be sent at lowest rate.
+ * This flag is used to send nullfunc frame at minimum rate when
+ * the nullfunc is used for connection monitoring purpose.
+ * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
+ * would be fragmented by size (this is optional, only used for
+ * monitor injection).
+ * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD).
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
*/
-enum mac80211_tx_control_flags {
+enum mac80211_tx_info_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1),
IEEE80211_TX_CTL_NO_ACK = BIT(2),
@@ -355,20 +522,40 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_STAT_AMPDU = BIT(10),
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
+ IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
- IEEE80211_TX_CTL_PSPOLL_RESPONSE = BIT(17),
+ IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
IEEE80211_TX_CTL_MORE_FRAMES = BIT(18),
IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19),
- IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20),
+ IEEE80211_TX_INTFL_MLME_CONN_TX = BIT(20),
IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
IEEE80211_TX_CTL_LDPC = BIT(22),
IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
+ IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25),
+ IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26),
+ IEEE80211_TX_CTL_NO_CCK_RATE = BIT(27),
+ IEEE80211_TX_STATUS_EOSP = BIT(28),
+ IEEE80211_TX_CTL_USE_MINRATE = BIT(29),
+ IEEE80211_TX_CTL_DONTFRAG = BIT(30),
+ IEEE80211_TX_CTL_PS_RESPONSE = BIT(31),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+/**
+ * enum mac80211_tx_control_flags - flags to describe transmit control
+ *
+ * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
+ * protocol frame (e.g. EAP)
+ *
+ * These flags are used in tx_info->control.flags.
+ */
+enum mac80211_tx_control_flags {
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+};
+
/*
* This definition is used as a mask to clear all temporary flags, which are
* set by the tx handlers for each transmission attempt by the mac80211 stack.
@@ -378,9 +565,9 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \
IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \
IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \
- IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_NO_PS_BUFFER | \
IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \
- IEEE80211_TX_CTL_STBC)
+ IEEE80211_TX_CTL_STBC | IEEE80211_TX_STATUS_EOSP)
/**
* enum mac80211_rate_control_flags - per-rate flags set by the
@@ -394,9 +581,14 @@ enum mac80211_tx_control_flags {
* This is set if the current BSS requires ERP protection.
* @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
* @IEEE80211_TX_RC_MCS: HT rate.
+ * @IEEE80211_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is split
+ * into a higher 4 bits (Nss) and lower 4 bits (MCS number)
* @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in
* Greenfield mode.
* @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz.
+ * @IEEE80211_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @IEEE80211_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ * (80+80 isn't supported yet)
* @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the
* adjacent 20 MHz channels, if the current channel type is
* NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
@@ -407,12 +599,15 @@ enum mac80211_rate_control_flags {
IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1),
IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(2),
- /* rate index is an MCS rate number instead of an index */
+ /* rate index is an HT/VHT MCS instead of an index */
IEEE80211_TX_RC_MCS = BIT(3),
IEEE80211_TX_RC_GREEN_FIELD = BIT(4),
IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(5),
IEEE80211_TX_RC_DUP_DATA = BIT(6),
IEEE80211_TX_RC_SHORT_GI = BIT(7),
+ IEEE80211_TX_RC_VHT_MCS = BIT(8),
+ IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(9),
+ IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(10),
};
@@ -423,7 +618,10 @@ enum mac80211_rate_control_flags {
#define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24
/* maximum number of rate stages */
-#define IEEE80211_TX_MAX_RATES 5
+#define IEEE80211_TX_MAX_RATES 4
+
+/* maximum number of rate table entries */
+#define IEEE80211_TX_RATE_TABLE_SIZE 4
/**
* struct ieee80211_tx_rate - rate selection/status
@@ -455,10 +653,32 @@ enum mac80211_rate_control_flags {
*/
struct ieee80211_tx_rate {
s8 idx;
- u8 count;
- u8 flags;
+ u16 count:5,
+ flags:11;
} __packed;
+#define IEEE80211_MAX_TX_RETRY 31
+
+static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
+ u8 mcs, u8 nss)
+{
+ WARN_ON(mcs & ~0xF);
+ WARN_ON((nss - 1) & ~0x7);
+ rate->idx = ((nss - 1) << 4) | mcs;
+}
+
+static inline u8
+ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *rate)
+{
+ return rate->idx & 0xF;
+}
+
+static inline u8
+ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
+{
+ return (rate->idx >> 4) + 1;
+}
+
/**
* struct ieee80211_tx_info - skb transmit information
*
@@ -467,13 +687,10 @@ struct ieee80211_tx_rate {
* (2) driver internal use (if applicable)
* (3) TX status information - driver tells mac80211 what happened
*
- * The TX control's sta pointer is only valid during the ->tx call,
- * it may be NULL.
- *
* @flags: transmit info flags, defined above
* @band: the band to transmit on (use for checking for races)
- * @antenna_sel_tx: antenna to use, 0 for automatic diversity
- * @pad: padding, ignore
+ * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
+ * @ack_frame_id: internal frame ID for TX status, used internally
* @control: union for control data
* @status: union for status data
* @driver_data: array of driver_data pointers
@@ -488,10 +705,9 @@ struct ieee80211_tx_info {
u32 flags;
u8 band;
- u8 antenna_sel_tx;
+ u8 hw_queue;
- /* 2 byte hole */
- u8 pad[2];
+ u16 ack_frame_id;
union {
struct {
@@ -501,6 +717,11 @@ struct ieee80211_tx_info {
struct ieee80211_tx_rate rates[
IEEE80211_TX_MAX_RATES];
s8 rts_cts_rate_idx;
+ u8 use_rts:1;
+ u8 use_cts_prot:1;
+ u8 short_preamble:1;
+ u8 skip_table:1;
+ /* 2 bytes free */
};
/* only needed before rate control */
unsigned long jiffies;
@@ -508,18 +729,22 @@ struct ieee80211_tx_info {
/* NB: vif can be NULL for injected frames */
struct ieee80211_vif *vif;
struct ieee80211_key_conf *hw_key;
- struct ieee80211_sta *sta;
+ u32 flags;
+ /* 4 bytes free */
} control;
struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
+ s32 ack_signal;
u8 ampdu_ack_len;
- int ack_signal;
u8 ampdu_len;
- /* 15 bytes free */
+ u8 antenna;
+ void *status_driver_data[21 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
IEEE80211_TX_MAX_RATES];
+ u8 pad[4];
+
void *rate_driver_data[
IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE / sizeof(void *)];
};
@@ -528,6 +753,21 @@ struct ieee80211_tx_info {
};
};
+/**
+ * struct ieee80211_sched_scan_ies - scheduled scan IEs
+ *
+ * This structure is used to pass the appropriate IEs to be used in scheduled
+ * scans for all bands. It contains both the IEs passed from the userspace
+ * and the ones generated by mac80211.
+ *
+ * @ie: array with the IEs for each supported band
+ * @len: array with the total length of the IEs for each band
+ */
+struct ieee80211_sched_scan_ies {
+ u8 *ie[IEEE80211_NUM_BANDS];
+ size_t len[IEEE80211_NUM_BANDS];
+};
+
static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
{
return (struct ieee80211_tx_info *)skb->cb;
@@ -567,7 +807,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
info->status.rates[i].count = 0;
BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
+ offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
memset(&info->status.ampdu_ack_len, 0,
sizeof(struct ieee80211_tx_info) -
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
@@ -590,26 +830,97 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* the frame.
- * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
- * is valid. This is useful in monitor mode and necessary for beacon frames
- * to enable IBSS merging.
+ * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the first symbol of the MPDU
+ * was received. This is useful in monitor mode and for proper IBSS
+ * merging.
+ * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the last symbol of the MPDU
+ * (including FCS) was received.
* @RX_FLAG_SHORTPRE: Short preamble was used for this frame
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
+ * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
* @RX_FLAG_40MHZ: HT40 (40 MHz) was used
* @RX_FLAG_SHORT_GI: Short guard interval was used
+ * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
+ * Valid only for data frames (mainly A-MPDU)
+ * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
+ * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
+ * to hw.radiotap_mcs_details to advertise that fact
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ * number (@ampdu_reference) must be populated and be a distinct number for
+ * each A-MPDU
+ * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
+ * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
+ * monitoring purposes only
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ * subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ * on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ * is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
+ * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
+ * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
+ * subframes instead of a one huge frame for performance reasons.
+ * All, but the last MSDU from an A-MSDU should have this flag set. E.g.
+ * if an A-MSDU has 3 frames, the first 2 must have the flag set, while
+ * the 3rd (last) one must not have this flag set. The flag is used to
+ * deal with retransmission/duplication recovery properly since A-MSDU
+ * subframes share the same sequence number. Reported subframes can be
+ * either regular MSDU or singly A-MSDUs. Subframes must not be
+ * interleaved with other frames.
*/
enum mac80211_rx_flags {
- RX_FLAG_MMIC_ERROR = 1<<0,
- RX_FLAG_DECRYPTED = 1<<1,
- RX_FLAG_MMIC_STRIPPED = 1<<3,
- RX_FLAG_IV_STRIPPED = 1<<4,
- RX_FLAG_FAILED_FCS_CRC = 1<<5,
- RX_FLAG_FAILED_PLCP_CRC = 1<<6,
- RX_FLAG_TSFT = 1<<7,
- RX_FLAG_SHORTPRE = 1<<8,
- RX_FLAG_HT = 1<<9,
- RX_FLAG_40MHZ = 1<<10,
- RX_FLAG_SHORT_GI = 1<<11,
+ RX_FLAG_MMIC_ERROR = BIT(0),
+ RX_FLAG_DECRYPTED = BIT(1),
+ RX_FLAG_MMIC_STRIPPED = BIT(3),
+ RX_FLAG_IV_STRIPPED = BIT(4),
+ RX_FLAG_FAILED_FCS_CRC = BIT(5),
+ RX_FLAG_FAILED_PLCP_CRC = BIT(6),
+ RX_FLAG_MACTIME_START = BIT(7),
+ RX_FLAG_SHORTPRE = BIT(8),
+ RX_FLAG_HT = BIT(9),
+ RX_FLAG_40MHZ = BIT(10),
+ RX_FLAG_SHORT_GI = BIT(11),
+ RX_FLAG_NO_SIGNAL_VAL = BIT(12),
+ RX_FLAG_HT_GF = BIT(13),
+ RX_FLAG_AMPDU_DETAILS = BIT(14),
+ RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
+ RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
+ RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
+ RX_FLAG_AMPDU_IS_LAST = BIT(18),
+ RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
+ RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
+ RX_FLAG_MACTIME_END = BIT(21),
+ RX_FLAG_VHT = BIT(22),
+ RX_FLAG_LDPC = BIT(23),
+ RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
+ RX_FLAG_10MHZ = BIT(28),
+ RX_FLAG_5MHZ = BIT(29),
+ RX_FLAG_AMSDU_MORE = BIT(30),
+};
+
+#define RX_FLAG_STBC_SHIFT 26
+
+/**
+ * enum mac80211_rx_vht_flags - receive VHT flags
+ *
+ * These flags are used with the @vht_flag member of
+ * &struct ieee80211_rx_status.
+ * @RX_VHT_FLAG_80MHZ: 80 MHz was used
+ * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
+ * @RX_VHT_FLAG_160MHZ: 160 MHz was used
+ * @RX_VHT_FLAG_BF: packet was beamformed
+ */
+enum mac80211_rx_vht_flags {
+ RX_VHT_FLAG_80MHZ = BIT(0),
+ RX_VHT_FLAG_80P80MHZ = BIT(1),
+ RX_VHT_FLAG_160MHZ = BIT(2),
+ RX_VHT_FLAG_BF = BIT(3),
};
/**
@@ -621,26 +932,44 @@ enum mac80211_rx_flags {
*
* @mactime: value in microseconds of the 64-bit Time Synchronization Function
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
+ * it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
* @signal: signal strength when receiving this frame, either in dBm, in dB or
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
+ * @chains: bitmask of receive chains for which separate signal strength
+ * values were filled.
+ * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't
+ * support dB or unspecified units)
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
- * HT rates are use (RX_FLAG_HT)
+ * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
+ * @vht_nss: number of streams (VHT only)
* @flag: %RX_FLAG_*
+ * @vht_flag: %RX_VHT_FLAG_*
* @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ * each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
*/
struct ieee80211_rx_status {
u64 mactime;
- enum ieee80211_band band;
- int freq;
- int signal;
- int antenna;
- int rate_idx;
- int flag;
- unsigned int rx_flags;
+ u32 device_timestamp;
+ u32 ampdu_reference;
+ u32 flag;
+ u16 freq;
+ u8 vht_flag;
+ u8 rate_idx;
+ u8 vht_nss;
+ u8 rx_flags;
+ u8 band;
+ u8 antenna;
+ s8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 ampdu_delimiter_crc;
};
/**
@@ -685,6 +1014,8 @@ enum ieee80211_conf_flags {
* @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
* @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
* @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
+ * Note that this is only valid if channel contexts are not used,
+ * otherwise each channel context has the number of chains listed.
*/
enum ieee80211_conf_changed {
IEEE80211_CONF_CHANGE_SMPS = BIT(1),
@@ -736,21 +1067,24 @@ enum ieee80211_smps_mode {
* powersave documentation below. This variable is valid only when
* the CONF_PS flag is set.
*
- * @power_level: requested transmit power (in dBm)
+ * @power_level: requested transmit power (in dBm), backward compatibility
+ * value only that is set to the minimum of all interfaces
*
- * @channel: the channel to tune to
- * @channel_type: the channel (HT) type
+ * @chandef: the channel definition to tune to
+ * @radar_enabled: whether radar detection is enabled
*
* @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
- * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
- * but actually means the number of transmissions not the number of retries
+ * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
+ * but actually means the number of transmissions not the number of retries
* @short_frame_max_tx_count: Maximum number of transmissions for a "short"
- * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
- * number of transmissions not the number of retries
+ * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
+ * number of transmissions not the number of retries
*
* @smps_mode: spatial multiplexing powersave mode; note that
* %IEEE80211_SMPS_STATIC is used when the device is not
- * configured for an HT channel
+ * configured for an HT channel.
+ * Note that this is only valid if channel contexts are not used,
+ * otherwise each channel context has the number of chains listed.
*/
struct ieee80211_conf {
u32 flags;
@@ -762,8 +1096,8 @@ struct ieee80211_conf {
u8 long_frame_max_tx_count, short_frame_max_tx_count;
- struct ieee80211_channel *channel;
- enum nl80211_channel_type channel_type;
+ struct cfg80211_chan_def chandef;
+ bool radar_enabled;
enum ieee80211_smps_mode smps_mode;
};
@@ -779,17 +1113,32 @@ struct ieee80211_conf {
* the driver passed into mac80211.
* @block_tx: Indicates whether transmission must be blocked before the
* scheduled channel switch, as indicated by the AP.
- * @channel: the new channel to switch to
+ * @chandef: the new channel to switch to
* @count: the number of TBTT's until the channel switch event
*/
struct ieee80211_channel_switch {
u64 timestamp;
bool block_tx;
- struct ieee80211_channel *channel;
+ struct cfg80211_chan_def chandef;
u8 count;
};
/**
+ * enum ieee80211_vif_flags - virtual interface flags
+ *
+ * @IEEE80211_VIF_BEACON_FILTER: the device performs beacon filtering
+ * on this virtual interface to avoid unnecessary CPU wakeups
+ * @IEEE80211_VIF_SUPPORTS_CQM_RSSI: the device can do connection quality
+ * monitoring on this virtual interface -- i.e. it can monitor
+ * connection quality related parameters, such as the RSSI level and
+ * provide notifications if configured trigger levels are reached.
+ */
+enum ieee80211_vif_flags {
+ IEEE80211_VIF_BEACON_FILTER = BIT(0),
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
+};
+
+/**
* struct ieee80211_vif - per-interface data
*
* Data in this structure is continually present for driver
@@ -801,6 +1150,23 @@ struct ieee80211_channel_switch {
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
+ * @csa_active: marks whether a channel switch is going on. Internally it is
+ * write-protected by sdata_lock and local->mtx so holding either is fine
+ * for read access.
+ * @driver_flags: flags/capabilities the driver has for this interface,
+ * these need to be set (or cleared) when the interface is added
+ * or, if supported by the driver, the interface type is changed
+ * at runtime, mac80211 will never touch this field
+ * @hw_queue: hardware queue for each AC
+ * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
+ * @chanctx_conf: The channel context this interface is assigned to, or %NULL
+ * when it is not assigned. This pointer is RCU-protected due to the TX
+ * path needing to access it; even though the netdev carrier will always
+ * be off when it is %NULL there can still be races and packets could be
+ * processed after it switches back to %NULL.
+ * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
+ * interface debug files. Note that it will be NULL for the virtual
+ * monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
*/
@@ -809,8 +1175,21 @@ struct ieee80211_vif {
struct ieee80211_bss_conf bss_conf;
u8 addr[ETH_ALEN];
bool p2p;
+ bool csa_active;
+
+ u8 cab_queue;
+ u8 hw_queue[IEEE80211_NUM_ACS];
+
+ struct ieee80211_chanctx_conf __rcu *chanctx_conf;
+
+ u32 driver_flags;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
+
/* must be last */
- u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
+ u8 drv_priv[0] __aligned(sizeof(void *));
};
static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
@@ -822,13 +1201,24 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
}
/**
+ * wdev_to_ieee80211_vif - return a vif struct from a wdev
+ * @wdev: the wdev to get the vif for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that get a wdev.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
* and mac80211, with the @flags parameter of &struct ieee80211_key_conf.
*
- * @IEEE80211_KEY_FLAG_WMM_STA: Set by mac80211, this flag indicates
- * that the STA this key will be used with could be using QoS.
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
* particular key.
@@ -837,16 +1227,32 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
* generation in software.
* @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
* that the key is pairwise rather then a shared key.
- * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
+ * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
* CCMP key if it requires CCMP encryption of management frames (MFP) to
* be done in software.
+ * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
+ * if space should be prepared for the IV, but the IV
+ * itself should not be generated. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
+ * management frames. The flag can help drivers that have a hardware
+ * crypto implementation that doesn't deal with management frames
+ * properly by allowing them to not upload the keys to hardware and
+ * fall back to software crypto. Note that this flag deals only with
+ * RX, if your crypto engine can't deal with TX you can also set the
+ * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
+ * driver for a CCMP key to indicate that is requires IV generation
+ * only for managment frames (MFP).
*/
enum ieee80211_key_flags {
- IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
- IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
- IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
- IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
- IEEE80211_KEY_FLAG_SW_MGMT = 1<<4,
+ IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
+ IEEE80211_KEY_FLAG_GENERATE_IV = BIT(1),
+ IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(2),
+ IEEE80211_KEY_FLAG_PAIRWISE = BIT(3),
+ IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4),
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
+ IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
};
/**
@@ -882,6 +1288,36 @@ struct ieee80211_key_conf {
};
/**
+ * struct ieee80211_cipher_scheme - cipher scheme
+ *
+ * This structure contains a cipher scheme information defining
+ * the secure packet crypto handling.
+ *
+ * @cipher: a cipher suite selector
+ * @iftype: a cipher iftype bit mask indicating an allowed cipher usage
+ * @hdr_len: a length of a security header used the cipher
+ * @pn_len: a length of a packet number in the security header
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: a bit shift needed to get key_idx
+ * key_idx value calculation:
+ * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift
+ * @mic_len: a mic length in bytes
+ */
+struct ieee80211_cipher_scheme {
+ u32 cipher;
+ u16 iftype;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+};
+
+/**
* enum set_key_cmd - key command
*
* Used with the set_key() callback in &struct ieee80211_ops, this
@@ -895,6 +1331,61 @@ enum set_key_cmd {
};
/**
+ * enum ieee80211_sta_state - station state
+ *
+ * @IEEE80211_STA_NOTEXIST: station doesn't exist at all,
+ * this is a special state for add/remove transitions
+ * @IEEE80211_STA_NONE: station exists without special state
+ * @IEEE80211_STA_AUTH: station is authenticated
+ * @IEEE80211_STA_ASSOC: station is associated
+ * @IEEE80211_STA_AUTHORIZED: station is authorized (802.1X)
+ */
+enum ieee80211_sta_state {
+ /* NOTE: These need to be ordered correctly! */
+ IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE,
+ IEEE80211_STA_AUTH,
+ IEEE80211_STA_ASSOC,
+ IEEE80211_STA_AUTHORIZED,
+};
+
+/**
+ * enum ieee80211_sta_rx_bandwidth - station RX bandwidth
+ * @IEEE80211_STA_RX_BW_20: station can only receive 20 MHz
+ * @IEEE80211_STA_RX_BW_40: station can receive up to 40 MHz
+ * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
+ * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
+ * (including 80+80 MHz)
+ *
+ * Implementation note: 20 must be zero to be initialized
+ * correctly, the values must be sorted.
+ */
+enum ieee80211_sta_rx_bandwidth {
+ IEEE80211_STA_RX_BW_20 = 0,
+ IEEE80211_STA_RX_BW_40,
+ IEEE80211_STA_RX_BW_80,
+ IEEE80211_STA_RX_BW_160,
+};
+
+/**
+ * struct ieee80211_sta_rates - station rate selection table
+ *
+ * @rcu_head: RCU head used for freeing the table on update
+ * @rate: transmit rates/flags to be used by default.
+ * Overriding entries per-packet is possible by using cb tx control.
+ */
+struct ieee80211_sta_rates {
+ struct rcu_head rcu_head;
+ struct {
+ s8 idx;
+ u8 count;
+ u8 count_cts;
+ u8 count_rts;
+ u16 flags;
+ } rate[IEEE80211_TX_RATE_TABLE_SIZE];
+};
+
+/**
* struct ieee80211_sta - station table entry
*
* A station table entry represents a station we are possibly
@@ -907,18 +1398,40 @@ enum set_key_cmd {
* @addr: MAC address
* @aid: AID we assigned to the station if we're an AP
* @supp_rates: Bitmap of supported rates (per band)
- * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities
+ * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
+ * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @wme: indicates whether the STA supports WME. Only valid during AP-mode.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
+ * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
+ * if wme is supported.
+ * @max_sp: max Service Period. Only valid if wme is supported.
+ * @bandwidth: current bandwidth the station can receive with
+ * @rx_nss: in HT/VHT, the maximum number of spatial streams the
+ * station can receive at the moment, changed by operating mode
+ * notifications and capabilities. The value is only valid after
+ * the station moves to associated state.
+ * @smps_mode: current SMPS mode (off, static or dynamic)
+ * @rates: rate control selection table
+ * @tdls: indicates whether the STA is a TDLS peer
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ bool wme;
+ u8 uapsd_queues;
+ u8 max_sp;
+ u8 rx_nss;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
+ enum ieee80211_smps_mode smps_mode;
+ struct ieee80211_sta_rates __rcu *rates;
+ bool tdls;
/* must be last */
- u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
+ u8 drv_priv[0] __aligned(sizeof(void *));
};
/**
@@ -935,18 +1448,13 @@ enum sta_notify_cmd {
};
/**
- * enum ieee80211_tkip_key_type - get tkip key
+ * struct ieee80211_tx_control - TX control data
*
- * Used by drivers which need to get a tkip key for skb. Some drivers need a
- * phase 1 key, others need a phase 2 key. A single function allows the driver
- * to get the key, this enum indicates what type of key is required.
- *
- * @IEEE80211_TKIP_P1_KEY: the driver needs a phase 1 key
- * @IEEE80211_TKIP_P2_KEY: the driver needs a phase 2 key
+ * @sta: station table entry, this sta pointer may be NULL and
+ * it is not allowed to copy the pointer, due to RCU.
*/
-enum ieee80211_tkip_key_type {
- IEEE80211_TKIP_P1_KEY,
- IEEE80211_TKIP_P2_KEY,
+struct ieee80211_tx_control {
+ struct ieee80211_sta *sta;
};
/**
@@ -1020,10 +1528,6 @@ enum ieee80211_tkip_key_type {
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
*
- * @IEEE80211_HW_BEACON_FILTER:
- * Hardware supports dropping of irrelevant beacon frames to
- * avoid waking up cpu.
- *
* @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
* Hardware supports static spatial multiplexing powersave,
* ie. can turn off all but one chain even on HT connections
@@ -1044,19 +1548,12 @@ enum ieee80211_tkip_key_type {
* the stack.
*
* @IEEE80211_HW_CONNECTION_MONITOR:
- * The hardware performs its own connection monitoring, including
- * periodic keep-alives to the AP and probing the AP on beacon loss.
- * When this flag is set, signaling beacon-loss will cause an immediate
- * change to disassociated state.
- *
- * @IEEE80211_HW_SUPPORTS_CQM_RSSI:
- * Hardware can do connection quality monitoring - i.e. it can monitor
- * connection quality related parameters, such as the RSSI level and
- * provide notifications if configured trigger levels are reached.
+ * The hardware performs its own connection monitoring, including
+ * periodic keep-alives to the AP and probing the AP on beacon loss.
*
- * @IEEE80211_HW_NEED_DTIM_PERIOD:
- * This device needs to know the DTIM period for the BSS before
- * associating.
+ * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
+ * This device needs to get data from beacon before association (i.e.
+ * dtim_period).
*
* @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports
* per-station GTKs as used by IBSS RSN or during fast transition. If
@@ -1064,6 +1561,51 @@ enum ieee80211_tkip_key_type {
* to decrypt group addressed frames, then IBSS RSN support is still
* possible but software crypto will be used. Advertise the wiphy flag
* only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ * autonomously manages the PS status of connected stations. When
+ * this flag is set mac80211 will not trigger PS mode for connected
+ * stations based on the PM bit of incoming frames.
+ * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ * the PS mode of connected stations.
+ *
+ * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session
+ * setup strictly in HW. mac80211 should not attempt to do this in
+ * software.
+ *
+ * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of
+ * a virtual monitor interface when monitor interfaces are the only
+ * active interfaces.
+ *
+ * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
+ * queue mapping in order to use different queues (not just one per AC)
+ * for different virtual interfaces. See the doc section on HW queue
+ * control for more details.
+ *
+ * @IEEE80211_HW_SUPPORTS_RC_TABLE: The driver supports using a rate
+ * selection table provided by the rate control algorithm.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ * P2P Interface. This will be honoured even if more than one interface
+ * is supported.
+ *
+ * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
+ * only, to allow getting TBTT of a DTIM beacon.
+ *
+ * @IEEE80211_HW_SUPPORTS_HT_CCK_RATES: Hardware supports mixing HT/CCK rates
+ * and can cope with CCK rates in an aggregation session (e.g. by not
+ * using aggregation for such frames.)
+ *
+ * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA)
+ * for a single active channel while using channel contexts. When support
+ * is not enabled the default action is to disconnect when getting the
+ * CSA frame.
+ *
+ * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a
+ * channel context on-the-fly. This is needed for channel switch
+ * on single-channel hardware. It can also be used as an
+ * optimization in certain channel switch cases with
+ * multi-channel.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1073,21 +1615,29 @@ enum ieee80211_hw_flags {
IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
IEEE80211_HW_SIGNAL_DBM = 1<<6,
- IEEE80211_HW_NEED_DTIM_PERIOD = 1<<7,
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7,
IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
IEEE80211_HW_SUPPORTS_PS = 1<<10,
IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
- IEEE80211_HW_BEACON_FILTER = 1<<14,
+ IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
- IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
+ IEEE80211_HW_QUEUE_CONTROL = 1<<20,
IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
+ IEEE80211_HW_AP_LINK_PS = 1<<22,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
+ IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
+ IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
+ IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
+ IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
+ IEEE80211_HW_CHANGE_RUNNING_CHANCTX = 1<<29,
};
/**
@@ -1112,13 +1662,14 @@ enum ieee80211_hw_flags {
* @extra_tx_headroom: headroom to reserve in each transmit skb
* for use by the driver (e.g. for transmit headers.)
*
- * @channel_change_time: time (in microseconds) it takes to change channels.
+ * @extra_beacon_tailroom: tailroom to reserve in each beacon tx skb.
+ * Can be used by drivers to add extra IEs.
*
* @max_signal: Maximum value for signal (rssi) in RX information, used
- * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
+ * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
*
* @max_listen_interval: max listen interval in units of beacon interval
- * that HW supports
+ * that HW supports
*
* @queues: number of available hardware transmit queues for
* data packets. WMM/QoS requires at least four, these
@@ -1132,6 +1683,8 @@ enum ieee80211_hw_flags {
* within &struct ieee80211_vif.
* @sta_data_size: size (in bytes) of the drv_priv data area
* within &struct ieee80211_sta.
+ * @chanctx_data_size: size (in bytes) of the drv_priv data area
+ * within &struct ieee80211_chanctx_conf.
*
* @max_rates: maximum number of alternate rate retry stages the hw
* can handle.
@@ -1139,9 +1692,47 @@ enum ieee80211_hw_flags {
* the hw can report back.
* @max_rate_tries: maximum number of tries for each stage
*
- * @napi_weight: weight used for NAPI polling. You must specify an
- * appropriate value here if a napi_poll operation is provided
- * by your driver.
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ * sub-frames) to be used for A-MPDU block ack receiver
+ * aggregation.
+ * This is only relevant if the device has restrictions on the
+ * number of subframes, if it relies on mac80211 to do reordering
+ * it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ * aggregate an HT driver will transmit, used by the peer as a
+ * hint to size its reorder buffer.
+ *
+ * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
+ * (if %IEEE80211_HW_QUEUE_CONTROL is set)
+ *
+ * @radiotap_mcs_details: lists which MCS information can the HW
+ * reports, by default it is set to _MCS, _GI and _BW but doesn't
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
+ * adding _BW is supported today.
+ *
+ * @radiotap_vht_details: lists which VHT MCS information the HW reports,
+ * the default is _GI | _BANDWIDTH.
+ * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
+ *
+ * @netdev_features: netdev features to be set in each netdev created
+ * from this HW. Note only HW checksum features are currently
+ * compatible with mac80211. Other feature bits will be rejected.
+ *
+ * @uapsd_queues: This bitmap is included in (re)association frame to indicate
+ * for each access category if it is uAPSD trigger-enabled and delivery-
+ * enabled. Use IEEE80211_WMM_IE_STA_QOSINFO_AC_* to set this bitmap.
+ * Each bit corresponds to different AC. Value '1' in specific bit means
+ * that corresponding AC is both trigger- and delivery-enabled. '0' means
+ * neither enabled.
+ *
+ * @uapsd_max_sp_len: maximum number of total buffered frames the WMM AP may
+ * deliver to a WMM STA during any Service Period triggered by the WMM STA.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values.
+ *
+ * @n_cipher_schemes: a size of an array of cipher schemes definitions.
+ * @cipher_schemes: a pointer to an array of cipher scheme definitions
+ * supported by HW.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1150,16 +1741,26 @@ struct ieee80211_hw {
void *priv;
u32 flags;
unsigned int extra_tx_headroom;
- int channel_change_time;
+ unsigned int extra_beacon_tailroom;
int vif_data_size;
int sta_data_size;
- int napi_weight;
+ int chanctx_data_size;
u16 queues;
u16 max_listen_interval;
s8 max_signal;
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
+ u8 max_rx_aggregation_subframes;
+ u8 max_tx_aggregation_subframes;
+ u8 offchannel_tx_hw_queue;
+ u8 radiotap_mcs_details;
+ u16 radiotap_vht_details;
+ netdev_features_t netdev_features;
+ u8 uapsd_queues;
+ u8 uapsd_max_sp_len;
+ u8 n_cipher_schemes;
+ const struct ieee80211_cipher_scheme *cipher_schemes;
};
/**
@@ -1172,6 +1773,8 @@ struct ieee80211_hw {
* structure can then access it via hw->priv. Note that mac802111 drivers should
* not use wiphy_priv() to try to get their private driver structure as this
* is already used internally by mac80211.
+ *
+ * Return: The mac80211 driver hw struct of @wiphy.
*/
struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy);
@@ -1201,7 +1804,7 @@ static inline struct ieee80211_rate *
ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *c)
{
- if (WARN_ON(c->control.rates[0].idx < 0))
+ if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
return NULL;
return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
}
@@ -1225,6 +1828,16 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
}
/**
+ * ieee80211_free_txskb - free TX skb
+ * @hw: the hardware
+ * @skb: the skb
+ *
+ * Free a transmit skb. Use this funtion when some failure
+ * to transmit happened and thus status cannot be reported.
+ */
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
* DOC: Hardware crypto acceleration
*
* mac80211 is capable of taking advantage of many hardware
@@ -1263,11 +1876,15 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key
* handler.
* The update_tkip_key() call updates the driver with the new phase 1 key.
- * This happens everytime the iv16 wraps around (every 65536 packets). The
+ * This happens every time the iv16 wraps around (every 65536 packets). The
* set_key() call will happen only once for each key (unless the AP did
* rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
* provided by update_tkip_key only. The trigger that makes mac80211 call this
* handler is software decryption with wrap around of iv16.
+ *
+ * The set_default_unicast_key() call updates the default WEP key index
+ * configured to the hardware for WEP encryption type. This is required
+ * for devices that support offload of data packets (e.g. ARP responses).
*/
/**
@@ -1320,18 +1937,9 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
* enabled whenever user has enabled powersave.
*
- * Some hardware need to toggle a single shared antenna between WLAN and
- * Bluetooth to facilitate co-existence. These types of hardware set
- * limitations on the use of host controlled dynamic powersave whenever there
- * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the
- * driver may request temporarily going into full power save, in order to
- * enable toggling the antenna between BT and WLAN. If the driver requests
- * disabling dynamic powersave, the @dynamic_ps_timeout value will be
- * temporarily set to zero until the driver re-enables dynamic powersave.
- *
* Driver informs U-APSD client support by enabling
* %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
- * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
+ * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
* Nullfunc frames and stay awake until the service period has ended. To
* utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
* from that AC are transmitted with powersave enabled.
@@ -1344,15 +1952,15 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* DOC: Beacon filter support
*
* Some hardware have beacon filter support to reduce host cpu wakeups
- * which will reduce system power consumption. It usuallly works so that
+ * which will reduce system power consumption. It usually works so that
* the firmware creates a checksum of the beacon but omits all constantly
* changing elements (TSF, TIM etc). Whenever the checksum changes the
* beacon is forwarded to the host, otherwise it will be just dropped. That
* way the host will only receive beacons where some relevant information
* (for example ERP protection or WMM settings) have changed.
*
- * Beacon filter support is advertised with the %IEEE80211_HW_BEACON_FILTER
- * hardware capability. The driver needs to enable beacon filter support
+ * Beacon filter support is advertised with the %IEEE80211_VIF_BEACON_FILTER
+ * interface capability. The driver needs to enable beacon filter support
* whenever power save is enabled, that is %IEEE80211_CONF_PS is set. When
* power save is enabled, the stack will not check for beacon loss and the
* driver needs to notify about loss of beacons with ieee80211_beacon_loss().
@@ -1466,6 +2074,158 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
*/
/**
+ * DOC: AP support for powersaving clients
+ *
+ * In order to implement AP and P2P GO modes, mac80211 has support for
+ * client powersaving, both "legacy" PS (PS-Poll/null data) and uAPSD.
+ * There currently is no support for sAPSD.
+ *
+ * There is one assumption that mac80211 makes, namely that a client
+ * will not poll with PS-Poll and trigger with uAPSD at the same time.
+ * Both are supported, and both can be used by the same client, but
+ * they can't be used concurrently by the same client. This simplifies
+ * the driver code.
+ *
+ * The first thing to keep in mind is that there is a flag for complete
+ * driver implementation: %IEEE80211_HW_AP_LINK_PS. If this flag is set,
+ * mac80211 expects the driver to handle most of the state machine for
+ * powersaving clients and will ignore the PM bit in incoming frames.
+ * Drivers then use ieee80211_sta_ps_transition() to inform mac80211 of
+ * stations' powersave transitions. In this mode, mac80211 also doesn't
+ * handle PS-Poll/uAPSD.
+ *
+ * In the mode without %IEEE80211_HW_AP_LINK_PS, mac80211 will check the
+ * PM bit in incoming frames for client powersave transitions. When a
+ * station goes to sleep, we will stop transmitting to it. There is,
+ * however, a race condition: a station might go to sleep while there is
+ * data buffered on hardware queues. If the device has support for this
+ * it will reject frames, and the driver should give the frames back to
+ * mac80211 with the %IEEE80211_TX_STAT_TX_FILTERED flag set which will
+ * cause mac80211 to retry the frame when the station wakes up. The
+ * driver is also notified of powersave transitions by calling its
+ * @sta_notify callback.
+ *
+ * When the station is asleep, it has three choices: it can wake up,
+ * it can PS-Poll, or it can possibly start a uAPSD service period.
+ * Waking up is implemented by simply transmitting all buffered (and
+ * filtered) frames to the station. This is the easiest case. When
+ * the station sends a PS-Poll or a uAPSD trigger frame, mac80211
+ * will inform the driver of this with the @allow_buffered_frames
+ * callback; this callback is optional. mac80211 will then transmit
+ * the frames as usual and set the %IEEE80211_TX_CTL_NO_PS_BUFFER
+ * on each frame. The last frame in the service period (or the only
+ * response to a PS-Poll) also has %IEEE80211_TX_STATUS_EOSP set to
+ * indicate that it ends the service period; as this frame must have
+ * TX status report it also sets %IEEE80211_TX_CTL_REQ_TX_STATUS.
+ * When TX status is reported for this frame, the service period is
+ * marked has having ended and a new one can be started by the peer.
+ *
+ * Additionally, non-bufferable MMPDUs can also be transmitted by
+ * mac80211 with the %IEEE80211_TX_CTL_NO_PS_BUFFER set in them.
+ *
+ * Another race condition can happen on some devices like iwlwifi
+ * when there are frames queued for the station and it wakes up
+ * or polls; the frames that are already queued could end up being
+ * transmitted first instead, causing reordering and/or wrong
+ * processing of the EOSP. The cause is that allowing frames to be
+ * transmitted to a certain station is out-of-band communication to
+ * the device. To allow this problem to be solved, the driver can
+ * call ieee80211_sta_block_awake() if frames are buffered when it
+ * is notified that the station went to sleep. When all these frames
+ * have been filtered (see above), it must call the function again
+ * to indicate that the station is no longer blocked.
+ *
+ * If the driver buffers frames in the driver for aggregation in any
+ * way, it must use the ieee80211_sta_set_buffered() call when it is
+ * notified of the station going to sleep to inform mac80211 of any
+ * TIDs that have frames buffered. Note that when a station wakes up
+ * this information is reset (hence the requirement to call it when
+ * informed of the station going to sleep). Then, when a service
+ * period starts for any reason, @release_buffered_frames is called
+ * with the number of frames to be released and which TIDs they are
+ * to come from. In this case, the driver is responsible for setting
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
+ * to help the @more_data parameter is passed to tell the driver if
+ * there is more data on other TIDs -- the TIDs to release frames
+ * from are ignored since mac80211 doesn't know how many frames the
+ * buffers for those TIDs contain.
+ *
+ * If the driver also implement GO mode, where absence periods may
+ * shorten service periods (or abort PS-Poll responses), it must
+ * filter those response frames except in the case of frames that
+ * are buffered in the driver -- those must remain buffered to avoid
+ * reordering. Because it is possible that no frames are released
+ * in this case, the driver must call ieee80211_sta_eosp()
+ * to indicate to mac80211 that the service period ended anyway.
+ *
+ * Finally, if frames from multiple TIDs are released from mac80211
+ * but the driver might reorder them, it must clear & set the flags
+ * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP)
+ * and also take care of the EOSP and MORE_DATA bits in the frame.
+ * The driver may also use ieee80211_sta_eosp() in this case.
+ *
+ * Note that if the driver ever buffers frames other than QoS-data
+ * frames, it must take care to never send a non-QoS-data frame as
+ * the last frame in a service period, adding a QoS-nulldata frame
+ * after a non-QoS-data frame if needed.
+ */
+
+/**
+ * DOC: HW queue control
+ *
+ * Before HW queue control was introduced, mac80211 only had a single static
+ * assignment of per-interface AC software queues to hardware queues. This
+ * was problematic for a few reasons:
+ * 1) off-channel transmissions might get stuck behind other frames
+ * 2) multiple virtual interfaces couldn't be handled correctly
+ * 3) after-DTIM frames could get stuck behind other frames
+ *
+ * To solve this, hardware typically uses multiple different queues for all
+ * the different usages, and this needs to be propagated into mac80211 so it
+ * won't have the same problem with the software queues.
+ *
+ * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability
+ * flag that tells it that the driver implements its own queue control. To do
+ * so, the driver will set up the various queues in each &struct ieee80211_vif
+ * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will
+ * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and
+ * if necessary will queue the frame on the right software queue that mirrors
+ * the hardware queue.
+ * Additionally, the driver has to then use these HW queue IDs for the queue
+ * management functions (ieee80211_stop_queue() et al.)
+ *
+ * The driver is free to set up the queue mappings as needed, multiple virtual
+ * interfaces may map to the same hardware queues if needed. The setup has to
+ * happen during add_interface or change_interface callbacks. For example, a
+ * driver supporting station+station and station+AP modes might decide to have
+ * 10 hardware queues to handle different scenarios:
+ *
+ * 4 AC HW queues for 1st vif: 0, 1, 2, 3
+ * 4 AC HW queues for 2nd vif: 4, 5, 6, 7
+ * after-DTIM queue for AP: 8
+ * off-channel queue: 9
+ *
+ * It would then set up the hardware like this:
+ * hw.offchannel_tx_hw_queue = 9
+ *
+ * and the first virtual interface that is added as follows:
+ * vif.hw_queue[IEEE80211_AC_VO] = 0
+ * vif.hw_queue[IEEE80211_AC_VI] = 1
+ * vif.hw_queue[IEEE80211_AC_BE] = 2
+ * vif.hw_queue[IEEE80211_AC_BK] = 3
+ * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE
+ * and the second virtual interface with 4-7.
+ *
+ * If queue 6 gets full, for example, mac80211 would only stop the second
+ * virtual interface's BE queue since virtual interface queues are per AC.
+ *
+ * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE
+ * whenever the queue is not used (i.e. the interface is not in AP mode) if the
+ * queue could potentially be shared since mac80211 will look at cab_queue when
+ * a queue is stopped/woken even if the interface is not in AP mode.
+ */
+
+/**
* enum ieee80211_filter_flags - hardware filter flags
*
* These flags determine what the filter in hardware should be
@@ -1526,21 +2286,83 @@ enum ieee80211_filter_flags {
* calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
* might receive the addBA frame and send a delBA right away!
*
- * @IEEE80211_AMPDU_RX_START: start Rx aggregation
- * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation
- * @IEEE80211_AMPDU_TX_START: start Tx aggregation
- * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation
+ * @IEEE80211_AMPDU_RX_START: start RX aggregation
+ * @IEEE80211_AMPDU_RX_STOP: stop RX aggregation
+ * @IEEE80211_AMPDU_TX_START: start TX aggregation
* @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
+ * @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting
+ * queued packets, now unaggregated. After all packets are transmitted the
+ * driver has to call ieee80211_stop_tx_ba_cb_irqsafe().
+ * @IEEE80211_AMPDU_TX_STOP_FLUSH: stop TX aggregation and flush all packets,
+ * called when the station is removed. There's no need or reason to call
+ * ieee80211_stop_tx_ba_cb_irqsafe() in this case as mac80211 assumes the
+ * session is gone and removes the station.
+ * @IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: called when TX aggregation is stopped
+ * but the driver hasn't called ieee80211_stop_tx_ba_cb_irqsafe() yet and
+ * now the connection is dropped and the station will be removed. Drivers
+ * should clean up and drop remaining packets when this is called.
*/
enum ieee80211_ampdu_mlme_action {
IEEE80211_AMPDU_RX_START,
IEEE80211_AMPDU_RX_STOP,
IEEE80211_AMPDU_TX_START,
- IEEE80211_AMPDU_TX_STOP,
+ IEEE80211_AMPDU_TX_STOP_CONT,
+ IEEE80211_AMPDU_TX_STOP_FLUSH,
+ IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
IEEE80211_AMPDU_TX_OPERATIONAL,
};
/**
+ * enum ieee80211_frame_release_type - frame release reason
+ * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
+ * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to
+ * frame received on trigger-enabled AC
+ */
+enum ieee80211_frame_release_type {
+ IEEE80211_FRAME_RELEASE_PSPOLL,
+ IEEE80211_FRAME_RELEASE_UAPSD,
+};
+
+/**
+ * enum ieee80211_rate_control_changed - flags to indicate what changed
+ *
+ * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
+ * to this station changed. The actual bandwidth is in the station
+ * information -- for HT20/40 the IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * flag changes, for HT and VHT the bandwidth field changes.
+ * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ * changed (in IBSS mode) due to discovering more information about
+ * the peer.
+ * @IEEE80211_RC_NSS_CHANGED: N_SS (number of spatial streams) was changed
+ * by the peer
+ */
+enum ieee80211_rate_control_changed {
+ IEEE80211_RC_BW_CHANGED = BIT(0),
+ IEEE80211_RC_SMPS_CHANGED = BIT(1),
+ IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
+ IEEE80211_RC_NSS_CHANGED = BIT(3),
+};
+
+/**
+ * enum ieee80211_roc_type - remain on channel type
+ *
+ * With the support for multi channel contexts and multi channel operations,
+ * remain on channel operations might be limited/deferred/aborted by other
+ * flows/operations which have higher priority (and vise versa).
+ * Specifying the ROC type can be used by devices to prioritize the ROC
+ * operations compared to other operations/flows.
+ *
+ * @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
+ * @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
+ * for sending managment frames offchannel.
+ */
+enum ieee80211_roc_type {
+ IEEE80211_ROC_TYPE_NORMAL = 0,
+ IEEE80211_ROC_TYPE_MGMT_TX,
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -1551,11 +2373,8 @@ enum ieee80211_ampdu_mlme_action {
* skb contains the buffer starting from the IEEE 802.11 header.
* The low-level driver should send the frame out based on
* configuration in the TX control data. This handler should,
- * preferably, never fail and stop queues appropriately, more
- * importantly, however, it must never fail for A-MPDU-queues.
- * This function should return NETDEV_TX_OK except in very
- * limited cases.
- * Must be implemented and atomic.
+ * preferably, never fail and stop queues appropriately.
+ * Must be atomic.
*
* @start: Called before the first netdevice attached to the hardware
* is enabled. This should turn on the hardware and must turn on
@@ -1575,6 +2394,27 @@ enum ieee80211_ampdu_mlme_action {
* you should ensure to cancel it on this callback.
* Must be implemented and can sleep.
*
+ * @suspend: Suspend the device; mac80211 itself will quiesce before and
+ * stop transmitting and doing any other configuration, and then
+ * ask the device to suspend. This is only invoked when WoWLAN is
+ * configured, otherwise the device is deconfigured completely and
+ * reconfigured at resume time.
+ * The driver may also impose special conditions under which it
+ * wants to use the "normal" suspend (deconfigure), say if it only
+ * supports WoWLAN when the device is associated. In this case, it
+ * must return 1 from this function.
+ *
+ * @resume: If WoWLAN was configured, this indicates that mac80211 is
+ * now resuming its operation, after this the device must be fully
+ * functional again. If this returns an error, the only way out is
+ * to also unregister the device. If it returns 1, then mac80211
+ * will also go through the regular complete restart on resume.
+ *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ * modified. The reason is that device_set_wakeup_enable() is
+ * supposed to be called when the configuration changes, not only
+ * in suspend().
+ *
* @add_interface: Called when a netdevice attached to the hardware is
* enabled. Because it is not called for monitor mode devices, @start
* and @stop must be implemented.
@@ -1636,6 +2476,16 @@ enum ieee80211_ampdu_mlme_action {
* which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY.
* The callback must be atomic.
*
+ * @set_rekey_data: If the device supports GTK rekeying, for example while the
+ * host is suspended, it can assign this callback to retrieve the data
+ * necessary to do GTK rekeying, this is the KEK, KCK and replay counter.
+ * After rekeying was done it should (for example during resume) notify
+ * userspace of the new replay counter using ieee80211_gtk_rekey_notify().
+ *
+ * @set_default_unicast_key: Set the default (unicast) key index, useful for
+ * WEP when the device sends data packets autonomously, e.g. for ARP
+ * offloading. The index can be 0-3, or -1 for unsetting it.
+ *
* @hw_scan: Ask the hardware to service the scan request, no need to start
* the scan state machine in stack. The scan must honour the channel
* configuration done by the regulatory agent in the wiphy's
@@ -1650,6 +2500,22 @@ enum ieee80211_ampdu_mlme_action {
* any error unless this callback returned a negative error code.
* The callback can sleep.
*
+ * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan.
+ * The driver should ask the hardware to cancel the scan (if possible),
+ * but the scan will be completed only after the driver will call
+ * ieee80211_scan_completed().
+ * This callback is needed for wowlan, to prevent enqueueing a new
+ * scan_work after the low-level driver was already suspended.
+ * The callback can sleep.
+ *
+ * @sched_scan_start: Ask the hardware to start scanning repeatedly at
+ * specific intervals. The driver must call the
+ * ieee80211_sched_scan_results() function whenever it finds results.
+ * This process will continue until sched_scan_stop is called.
+ *
+ * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan.
+ * In this case, ieee80211_sched_scan_stopped() must not be called.
+ *
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
* The callback can sleep.
@@ -1680,10 +2546,49 @@ enum ieee80211_ampdu_mlme_action {
* AP, IBSS/WDS/mesh peer etc. This callback can sleep.
*
* @sta_remove: Notifies low level driver about removal of an associated
- * station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
+ * station, AP, IBSS/WDS/mesh peer etc. Note that after the callback
+ * returns it isn't safe to use the pointer, not even RCU protected;
+ * no RCU grace period is guaranteed between returning here and freeing
+ * the station. See @sta_pre_rcu_remove if needed.
+ * This callback can sleep.
+ *
+ * @sta_add_debugfs: Drivers can use this callback to add debugfs files
+ * when a station is added to mac80211's station list. This callback
+ * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
+ * conditional. This callback can sleep.
+ *
+ * @sta_remove_debugfs: Remove the debugfs files which were added using
+ * @sta_add_debugfs. This callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
- * associated station, AP, IBSS/WDS/mesh peer etc. Must be atomic.
+ * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
+ * in AP mode, this callback will not be called when the flag
+ * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
+ *
+ * @sta_state: Notifies low level driver about state transition of a
+ * station (which can be the AP, a client, IBSS/WDS/mesh peer etc.)
+ * This callback is mutually exclusive with @sta_add/@sta_remove.
+ * It must not fail for down transitions but may fail for transitions
+ * up the list of states. Also note that after the callback returns it
+ * isn't safe to use the pointer, not even RCU protected - no RCU grace
+ * period is guaranteed between returning here and freeing the station.
+ * See @sta_pre_rcu_remove if needed.
+ * The callback can sleep.
+ *
+ * @sta_pre_rcu_remove: Notify driver about station removal before RCU
+ * synchronisation. This is useful if a driver needs to have station
+ * pointers protected using RCU, it can then use this call to clear
+ * the pointers instead of waiting for an RCU grace period to elapse
+ * in @sta_state.
+ * The callback can sleep.
+ *
+ * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
+ * used to transmit to the station. The changes are advertised with bits
+ * from &enum ieee80211_rate_control_changed and the values are reflected
+ * in the station data. This callback should only be used when the driver
+ * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
+ * otherwise the rate control algorithm is notified directly.
+ * Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
@@ -1696,7 +2601,7 @@ enum ieee80211_ampdu_mlme_action {
* The callback can sleep.
*
* @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
- * Currently, this is only used for IBSS mode debugging. Is not a
+ * Currently, this is only used for IBSS mode debugging. Is not a
* required function.
* The callback can sleep.
*
@@ -1718,6 +2623,21 @@ enum ieee80211_ampdu_mlme_action {
* ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
+ * The @buf_size parameter is only valid when the action is set to
+ * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
+ * buffer size (number of subframes) for this session -- the driver
+ * may neither send aggregates containing more subframes than this
+ * nor send aggregates in a way that lost frames would exceed the
+ * buffer size. If just limiting the aggregate size, this would be
+ * possible with a buf_size of 8:
+ * - TX: 1.....7
+ * - RX: 2....7 (lost frame #1)
+ * - TX: 8..1...
+ * which is invalid since #1 was now re-transmitted well past the
+ * buffer size of 8. Correct ways to retransmit #1 would be:
+ * - TX: 1 or 18 or 81
+ * Even "189" would be wrong since 1 could be lost again.
+ *
* Returns a negative error code on failure.
* The callback can sleep.
*
@@ -1732,31 +2652,181 @@ enum ieee80211_ampdu_mlme_action {
* in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
* accordingly. This callback is not required and may sleep.
*
- * @testmode_cmd: Implement a cfg80211 test mode command.
- * The callback can sleep.
+ * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
+ * be %NULL. The callback can sleep.
+ * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
*
* @flush: Flush all pending frames from the hardware queue, making sure
- * that the hardware queues are empty. If the parameter @drop is set
- * to %true, pending frames may be dropped. The callback can sleep.
+ * that the hardware queues are empty. The @queues parameter is a bitmap
+ * of queues to flush, which is useful if different virtual interfaces
+ * use different hardware queues; it may also indicate all queues.
+ * If the parameter @drop is set to %true, pending frames may be dropped.
+ * Note that vif can be NULL.
+ * The callback can sleep.
*
* @channel_switch: Drivers that need (or want) to offload the channel
* switch operation for CSAs received from the AP may implement this
* callback. They must then call ieee80211_chswitch_done() to indicate
* completion of the channel switch.
*
- * @napi_poll: Poll Rx queue for incoming data frames.
- *
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
* reject TX/RX mask combinations they cannot support by returning -EINVAL
* (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
*
* @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @remain_on_channel: Starts an off-channel period on the given channel, must
+ * call back to ieee80211_ready_on_channel() when on that channel. Note
+ * that normal channel traffic is not stopped as this is intended for hw
+ * offload. Frames to transmit on the off-channel channel are transmitted
+ * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
+ * duration (which will always be non-zero) expires, the driver must call
+ * ieee80211_remain_on_channel_expired().
+ * Note that this callback may be called while the device is in IDLE and
+ * must be accepted in this case.
+ * This callback may sleep.
+ * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
+ * aborted before it expires. This callback may sleep.
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
+ *
+ * @tx_frames_pending: Check if there is any pending frame in the hardware
+ * queues before entering power save.
+ *
+ * @set_bitrate_mask: Set a mask of rates to be used for rate control selection
+ * when transmitting a frame. Currently only legacy rates are handled.
+ * The callback can sleep.
+ * @rssi_callback: Notify driver when the average RSSI goes above/below
+ * thresholds that were registered previously. The callback can sleep.
+ *
+ * @release_buffered_frames: Release buffered frames according to the given
+ * parameters. In the case where the driver buffers some frames for
+ * sleeping stations mac80211 will use this callback to tell the driver
+ * to release some frames, either for PS-poll or uAPSD.
+ * Note that if the @more_data parameter is %false the driver must check
+ * if there are more frames on the given TIDs, and if there are more than
+ * the frames being released then it must still set the more-data bit in
+ * the frame. If the @more_data parameter is %true, then of course the
+ * more-data bit must always be set.
+ * The @tids parameter tells the driver which TIDs to release frames
+ * from, for PS-poll it will always have only a single bit set.
+ * In the case this is used for a PS-poll initiated release, the
+ * @num_frames parameter will always be 1 so code can be shared. In
+ * this case the driver must also set %IEEE80211_TX_STATUS_EOSP flag
+ * on the TX status (and must report TX status) so that the PS-poll
+ * period is properly ended. This is used to avoid sending multiple
+ * responses for a retried PS-poll frame.
+ * In the case this is used for uAPSD, the @num_frames parameter may be
+ * bigger than one, but the driver may send fewer frames (it must send
+ * at least one, however). In this case it is also responsible for
+ * setting the EOSP flag in the QoS header of the frames. Also, when the
+ * service period ends, the driver must set %IEEE80211_TX_STATUS_EOSP
+ * on the last frame in the SP. Alternatively, it may call the function
+ * ieee80211_sta_eosp() to inform mac80211 of the end of the SP.
+ * This callback must be atomic.
+ * @allow_buffered_frames: Prepare device to allow the given number of frames
+ * to go out to the given station. The frames will be sent by mac80211
+ * via the usual TX path after this call. The TX information for frames
+ * released will also have the %IEEE80211_TX_CTL_NO_PS_BUFFER flag set
+ * and the last one will also have %IEEE80211_TX_STATUS_EOSP set. In case
+ * frames from multiple TIDs are released and the driver might reorder
+ * them between the TIDs, it must set the %IEEE80211_TX_STATUS_EOSP flag
+ * on the last frame and clear it on all others and also handle the EOSP
+ * bit in the QoS header correctly. Alternatively, it can also call the
+ * ieee80211_sta_eosp() function.
+ * The @tids parameter is a bitmap and tells the driver which TIDs the
+ * frames will be on; it will at most have two bits set.
+ * This callback must be atomic.
+ *
+ * @get_et_sset_count: Ethtool API to get string-set count.
+ *
+ * @get_et_stats: Ethtool API to get a set of u64 stats.
+ *
+ * @get_et_strings: Ethtool API to get a set of strings to describe stats
+ * and perhaps other supported types of ethtool data-sets.
+ *
+ * @get_rssi: Get current signal strength in dBm, the function is optional
+ * and can sleep.
+ *
+ * @mgd_prepare_tx: Prepare for transmitting a management frame for association
+ * before associated. In multi-channel scenarios, a virtual interface is
+ * bound to a channel before it is associated, but as it isn't associated
+ * yet it need not necessarily be given airtime, in particular since any
+ * transmission to a P2P GO needs to be synchronized against the GO's
+ * powersave state. mac80211 will call this function before transmitting a
+ * management frame prior to having successfully associated to allow the
+ * driver to give it channel time for the transmission, to get a response
+ * and to be able to synchronize with the GO.
+ * The callback will be called before each transmission and upon return
+ * mac80211 will transmit the frame right away.
+ * The callback is optional and can (should!) sleep.
+ *
+ * @add_chanctx: Notifies device driver about new channel context creation.
+ * @remove_chanctx: Notifies device driver about channel context destruction.
+ * @change_chanctx: Notifies device driver about channel context changes that
+ * may happen when combining different virtual interfaces on the same
+ * channel context with different settings
+ * @assign_vif_chanctx: Notifies device driver about channel context being bound
+ * to vif. Possible use is for hw queue remapping.
+ * @unassign_vif_chanctx: Notifies device driver about channel context being
+ * unbound from vif.
+ * @switch_vif_chanctx: switch a number of vifs from one chanctx to
+ * another, as specified in the list of
+ * @ieee80211_vif_chanctx_switch passed to the driver, according
+ * to the mode defined in &ieee80211_chanctx_switch_mode.
+ *
+ * @start_ap: Start operation on the AP interface, this is called after all the
+ * information in bss_conf is set and beacon can be retrieved. A channel
+ * context is bound before this is called. Note that if the driver uses
+ * software scan or ROC, this (and @stop_ap) isn't called when the AP is
+ * just "paused" for scanning/ROC, which is indicated by the beacon being
+ * disabled/enabled via @bss_info_changed.
+ * @stop_ap: Stop operation on the AP interface.
+ *
+ * @restart_complete: Called after a call to ieee80211_restart_hw(), when the
+ * reconfiguration has completed. This can help the driver implement the
+ * reconfiguration step. Also called when reconfiguring because the
+ * driver's resume function returned 1, as this is just like an "inline"
+ * hardware restart. This callback may sleep.
+ *
+ * @ipv6_addr_change: IPv6 address assignment on the given interface changed.
+ * Currently, this is only called for managed or P2P client interfaces.
+ * This callback is optional; it must not sleep.
+ *
+ * @channel_switch_beacon: Starts a channel switch to a new channel.
+ * Beacons are modified to include CSA or ECSA IEs before calling this
+ * function. The corresponding count fields in these IEs must be
+ * decremented, and when they reach 1 the driver must call
+ * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
+ * get the csa counter decremented by mac80211, but must check if it is
+ * 1 using ieee80211_csa_is_complete() after the beacon has been
+ * transmitted and then call ieee80211_csa_finish().
+ * If the CSA count starts as zero or 1, this function will not be called,
+ * since there won't be any time to beacon before the switch anyway.
+ *
+ * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
+ * information in bss_conf is set up and the beacon can be retrieved. A
+ * channel context is bound before this is called.
+ * @leave_ibss: Leave the IBSS again.
+ *
+ * @get_expected_throughput: extract the expected throughput towards the
+ * specified station. The returned value is expressed in Kbps. It returns 0
+ * if the RC algorithm does not have proper data to provide.
*/
struct ieee80211_ops {
- int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+ void (*tx)(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
+#ifdef CONFIG_PM
+ int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+ int (*resume)(struct ieee80211_hw *hw);
+ void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled);
+#endif
int (*add_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int (*change_interface)(struct ieee80211_hw *hw,
@@ -1769,6 +2839,10 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed);
+
+ int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
void (*configure_filter)(struct ieee80211_hw *hw,
@@ -1785,8 +2859,21 @@ struct ieee80211_ops {
struct ieee80211_key_conf *conf,
struct ieee80211_sta *sta,
u32 iv32, u16 *phase1key);
+ void (*set_rekey_data)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+ void (*set_default_unicast_key)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
+ void (*cancel_hw_scan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*sched_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+ int (*sched_scan_stop)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void (*sw_scan_start)(struct ieee80211_hw *hw);
void (*sw_scan_complete)(struct ieee80211_hw *hw);
int (*get_stats)(struct ieee80211_hw *hw,
@@ -1799,31 +2886,133 @@ struct ieee80211_ops {
struct ieee80211_sta *sta);
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+#ifdef CONFIG_MAC80211_DEBUGFS
+ void (*sta_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+ void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
- int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
+ int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+ void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*sta_rc_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed);
+ int (*conf_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params);
- u64 (*get_tsf)(struct ieee80211_hw *hw);
- void (*set_tsf)(struct ieee80211_hw *hw, u64 tsf);
- void (*reset_tsf)(struct ieee80211_hw *hw);
+ u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf);
+ void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
+ int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+ int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
#endif
- void (*flush)(struct ieee80211_hw *hw, bool drop);
+ void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
void (*channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch);
- int (*napi_poll)(struct ieee80211_hw *hw, int budget);
int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+
+ int (*remain_on_channel)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type);
+ int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
+ int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
+ void (*get_ringparam)(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
+ bool (*tx_frames_pending)(struct ieee80211_hw *hw);
+ int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask);
+ void (*rssi_callback)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_rssi_event rssi_event);
+
+ void (*allow_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+ void (*release_buffered_frames)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+
+ int (*get_et_sset_count)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+ void (*get_et_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+ void (*get_et_strings)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+ int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 *rssi_dbm);
+
+ void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+ int (*add_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*remove_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*change_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+ int (*assign_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
+ void (*unassign_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
+ int (*switch_vif_chanctx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
+ void (*restart_complete)(struct ieee80211_hw *hw);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ void (*ipv6_addr_change)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+#endif
+ void (*channel_switch_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef);
+
+ int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
};
/**
@@ -1837,6 +3026,8 @@ struct ieee80211_ops {
*
* @priv_data_len: length of private data
* @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
*/
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops);
@@ -1849,14 +3040,44 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
* need to fill the contained wiphy's information.
*
* @hw: the device to register as returned by ieee80211_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int ieee80211_register_hw(struct ieee80211_hw *hw);
+/**
+ * struct ieee80211_tpt_blink - throughput blink description
+ * @throughput: throughput in Kbit/sec
+ * @blink_time: blink time in milliseconds
+ * (full cycle, ie. one off + one on period)
+ */
+struct ieee80211_tpt_blink {
+ int throughput;
+ int blink_time;
+};
+
+/**
+ * enum ieee80211_tpt_led_trigger_flags - throughput trigger flags
+ * @IEEE80211_TPT_LEDTRIG_FL_RADIO: enable blinking with radio
+ * @IEEE80211_TPT_LEDTRIG_FL_WORK: enable blinking when working
+ * @IEEE80211_TPT_LEDTRIG_FL_CONNECTED: enable blinking when at least one
+ * interface is connected in some way, including being an AP
+ */
+enum ieee80211_tpt_led_trigger_flags {
+ IEEE80211_TPT_LEDTRIG_FL_RADIO = BIT(0),
+ IEEE80211_TPT_LEDTRIG_FL_WORK = BIT(1),
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED = BIT(2),
+};
+
#ifdef CONFIG_MAC80211_LEDS
-extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -1867,6 +3088,8 @@ extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
* of the trigger so you can automatically link the LED device.
*
* @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
@@ -1886,6 +3109,8 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
* of the trigger so you can automatically link the LED device.
*
* @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
@@ -1905,6 +3130,8 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
* of the trigger so you can automatically link the LED device.
*
* @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
@@ -1924,6 +3151,8 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
* of the trigger so you can automatically link the LED device.
*
* @hw: the hardware to get the LED trigger name for
+ *
+ * Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
@@ -1935,6 +3164,31 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
}
/**
+ * ieee80211_create_tpt_led_trigger - create throughput LED trigger
+ * @hw: the hardware to create the trigger for
+ * @flags: trigger flags, see &enum ieee80211_tpt_led_trigger_flags
+ * @blink_table: the blink table -- needs to be ordered by throughput
+ * @blink_table_len: size of the blink table
+ *
+ * Return: %NULL (in case of error, or if no LED triggers are
+ * configured) or the name of the new trigger.
+ *
+ * Note: This function must be called before ieee80211_register_hw().
+ */
+static inline char *
+ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ return __ieee80211_create_tpt_led_trigger(hw, flags, blink_table,
+ blink_table_len);
+#else
+ return NULL;
+#endif
+}
+
+/**
* ieee80211_unregister_hw - Unregister a hardware device
*
* This function instructs mac80211 to free allocated resources
@@ -1969,21 +3223,21 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
*/
void ieee80211_restart_hw(struct ieee80211_hw *hw);
-/** ieee80211_napi_schedule - schedule NAPI poll
- *
- * Use this function to schedule NAPI polling on a device.
- *
- * @hw: the hardware to start polling
- */
-void ieee80211_napi_schedule(struct ieee80211_hw *hw);
-
-/** ieee80211_napi_complete - complete NAPI polling
- *
- * Use this function to finish NAPI polling on a device.
- *
- * @hw: the hardware to stop polling
+/**
+ * ieee80211_napi_add - initialize mac80211 NAPI context
+ * @hw: the hardware to initialize the NAPI context on
+ * @napi: the NAPI context to initialize
+ * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
+ * driver doesn't use NAPI
+ * @poll: poll function
+ * @weight: default weight
+ *
+ * See also netif_napi_add().
*/
-void ieee80211_napi_complete(struct ieee80211_hw *hw);
+void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
/**
* ieee80211_rx - receive frame
@@ -1997,7 +3251,8 @@ void ieee80211_napi_complete(struct ieee80211_hw *hw);
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
- * mixed for a single hardware.
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* In process context use instead ieee80211_rx_ni().
*
@@ -2013,7 +3268,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
* (internally defers to a tasklet.)
*
* Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
- * be mixed for a single hardware.
+ * be mixed for a single hardware.Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -2027,7 +3283,8 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
* (internally disables bottom halves).
*
* Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
- * not be mixed for a single hardware.
+ * not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -2040,11 +3297,105 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
local_bh_enable();
}
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ *
+ * Return: 0 on success. -EINVAL when the requested PS mode is already set.
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ * (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ *
+ * Return: Like ieee80211_sta_ps_transition().
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+ bool start)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ieee80211_sta_ps_transition(sta, start);
+ local_bh_enable();
+
+ return ret;
+}
+
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
*/
-#define IEEE80211_TX_STATUS_HEADROOM 13
+#define IEEE80211_TX_STATUS_HEADROOM 14
+
+/**
+ * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
+ * @sta: &struct ieee80211_sta pointer for the sleeping station
+ * @tid: the TID that has buffered frames
+ * @buffered: indicates whether or not frames are buffered for this TID
+ *
+ * If a driver buffers frames for a powersave station instead of passing
+ * them back to mac80211 for retransmission, the station may still need
+ * to be told that there are buffered frames via the TIM bit.
+ *
+ * This function informs mac80211 whether or not there are frames that are
+ * buffered in the driver for a given TID; mac80211 can then use this data
+ * to set the TIM bit (NOTE: This may call back into the driver's set_tim
+ * call! Beware of the locking!)
+ *
+ * If all frames are released to the station (due to PS-poll or uAPSD)
+ * then the driver needs to inform mac80211 that there no longer are
+ * frames buffered. However, when the station wakes up mac80211 assumes
+ * that all buffered frames will be transmitted and clears this data,
+ * drivers need to make sure they inform mac80211 about all buffered
+ * frames on the sleep transition (sta_notify() with %STA_NOTIFY_SLEEP).
+ *
+ * Note that technically mac80211 only needs to know this per AC, not per
+ * TID, but since driver buffering will inevitably happen per TID (since
+ * it is related to aggregation) it is easier to make mac80211 map the
+ * TID to the AC as required instead of keeping track in all drivers that
+ * use this API.
+ */
+void ieee80211_sta_set_buffered(struct ieee80211_sta *sta,
+ u8 tid, bool buffered);
+
+/**
+ * ieee80211_get_tx_rates - get the selected transmit rates for a packet
+ *
+ * Call this function in a driver with per-packet rate selection support
+ * to combine the rate info in the packet tx info with the most recent
+ * rate selection table for the station entry.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: the receiver station to which this packet is sent.
+ * @skb: the frame to be transmitted.
+ * @dest: buffer for extracted rate/retry information
+ * @max_rates: maximum number of rates to fetch
+ */
+void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct ieee80211_tx_rate *dest,
+ int max_rates);
/**
* ieee80211_tx_status - transmit status callback
@@ -2055,8 +3406,9 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
*
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls
- * to this function and ieee80211_tx_status_irqsafe() may not be mixed
- * for a single hardware.
+ * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
+ * may not be mixed for a single hardware. Must not run concurrently with
+ * ieee80211_rx() or ieee80211_rx_ni().
*
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
@@ -2065,13 +3417,33 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_ni - transmit status callback (in process context)
+ *
+ * Like ieee80211_tx_status() but can be called in process context.
+ *
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_irqsafe() may not be mixed
+ * for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ local_bh_disable();
+ ieee80211_tx_status(hw, skb);
+ local_bh_enable();
+}
+
+/**
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
*
* Like ieee80211_tx_status() but can be called in IRQ context
* (internally defers to a tasklet.)
*
- * Calls to this function and ieee80211_tx_status() may not be mixed for a
- * single hardware.
+ * Calls to this function, ieee80211_tx_status() and
+ * ieee80211_tx_status_ni() may not be mixed for a single hardware.
*
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
@@ -2080,6 +3452,58 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_report_low_ack - report non-responding station
+ *
+ * When operating in AP-mode, call this function to report a non-responding
+ * connected STA.
+ *
+ * @sta: the non-responding connected sta
+ * @num_packets: number of packets sent to @sta without a response
+ */
+void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
+
+#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+
+/**
+ * struct ieee80211_mutable_offsets - mutable beacon offsets
+ * @tim_offset: position of TIM element
+ * @tim_length: size of TIM element
+ * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
+ * to CSA counters. This array can contain zero values which
+ * should be ignored.
+ */
+struct ieee80211_mutable_offsets {
+ u16 tim_offset;
+ u16 tim_length;
+
+ u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+};
+
+/**
+ * ieee80211_beacon_get_template - beacon template generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @offs: &struct ieee80211_mutable_offsets pointer to struct that will
+ * receive the offsets that may be updated by the driver.
+ *
+ * If the driver implements beaconing modes, it must use this function to
+ * obtain the beacon template.
+ *
+ * This function should be used if the beacon frames are generated by the
+ * device, and then the driver must use the returned beacon as the template
+ * The driver or the device are responsible to update the DTIM and, when
+ * applicable, the CSA count.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: The beacon template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_beacon_get_template(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_mutable_offsets *offs);
+
+/**
* ieee80211_beacon_get_tim - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -2090,18 +3514,16 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
* Set to 0 if invalid (in non-AP modes).
*
* If the driver implements beaconing modes, it must use this function to
- * obtain the beacon frame/template.
+ * obtain the beacon frame.
*
* If the beacon frames are generated by the host system (i.e., not in
* hardware/firmware), the driver uses this function to get each beacon
- * frame from mac80211 -- it is responsible for calling this function
- * before the beacon is needed (e.g. based on hardware interrupt).
- *
- * If the beacon frames are generated by the device, then the driver
- * must use the returned beacon as the template and change the TIM IE
- * according to the current DTIM parameters/TIM bitmap.
+ * frame from mac80211 -- it is responsible for calling this function exactly
+ * once before the beacon is needed (e.g. based on hardware interrupt).
*
* The driver is responsible for freeing the returned skb.
+ *
+ * Return: The beacon template. %NULL on error.
*/
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2113,6 +3535,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* See ieee80211_beacon_get_tim().
+ *
+ * Return: See ieee80211_beacon_get_tim().
*/
static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
@@ -2121,6 +3545,54 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The csa counter should be updated after each beacon transmission.
+ * This function is called implicitly when
+ * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
+ * beacon frames are generated by the device, the driver should call this
+ * function after each beacon transmission to sync mac80211's csa counters.
+ *
+ * Return: new csa counter value
+ */
+u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_csa_finish - notify mac80211 about channel switch
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a channel switch announcement was scheduled and the counter in this
+ * announcement hits 1, this function must be called by the driver to
+ * notify mac80211 that the channel can be changed.
+ */
+void ieee80211_csa_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_csa_is_complete - find out if counters reached 1
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function returns whether the channel switch counters reached zero.
+ */
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+
+
+/**
+ * ieee80211_proberesp_get - retrieve a Probe Response template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Response template which can, for example, be uploaded to
+ * hardware. The destination address should be set by the caller.
+ *
+ * Can only be called in AP mode.
+ *
+ * Return: The Probe Response template. %NULL on error.
+ */
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
* ieee80211_pspoll_get - retrieve a PS Poll template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -2131,6 +3603,8 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
*
* Note: Caller (or hardware) is responsible for setting the
* &IEEE80211_FCTL_PM bit.
+ *
+ * Return: The PS Poll template. %NULL on error.
*/
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -2146,6 +3620,8 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
*
* Note: Caller (or hardware) is responsible for setting the
* &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
+ *
+ * Return: The nullfunc template. %NULL on error.
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -2156,16 +3632,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @ssid: SSID buffer
* @ssid_len: length of SSID
- * @ie: buffer containing all IEs except SSID for the template
- * @ie_len: length of the IE buffer
+ * @tailroom: tailroom to reserve at end of SKB for IEs
*
* Creates a Probe Request template which can, for example, be uploaded to
* hardware.
+ *
+ * Return: The Probe Request template. %NULL on error.
*/
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len);
+ size_t tailroom);
/**
* ieee80211_rts_get - RTS frame generation function
@@ -2196,6 +3673,8 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* If the RTS is generated in firmware, but the host system must provide
* the duration field, the low-level driver uses this function to receive
* the duration field value in little-endian byteorder.
+ *
+ * Return: The duration.
*/
__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, size_t frame_len,
@@ -2231,6 +3710,8 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
* If the CTS-to-self is generated in firmware, but the host system must provide
* the duration field, the low-level driver uses this function to receive
* the duration field value in little-endian byteorder.
+ *
+ * Return: The duration.
*/
__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2241,14 +3722,18 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
* ieee80211_generic_frame_duration - Calculate the duration field for a frame
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
* @frame_len: the length of the frame.
* @rate: the rate at which the frame is going to be transmitted.
*
* Calculate the duration field of some generic frame, given its
* length and transmission rate (in 100kbps).
+ *
+ * Return: The duration.
*/
__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ enum ieee80211_band band,
size_t frame_len,
struct ieee80211_rate *rate);
@@ -2261,9 +3746,10 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
* hardware/firmware does not implement buffering of broadcast/multicast
* frames when power saving is used, 802.11 code buffers them in the host
* memory. The low-level driver uses this function to fetch next buffered
- * frame. In most cases, this is used when generating beacon frame. This
- * function returns a pointer to the next buffered skb or NULL if no more
- * buffered frames are available.
+ * frame. In most cases, this is used when generating beacon frame.
+ *
+ * Return: A pointer to the next buffered skb or NULL if no more buffered
+ * frames are available.
*
* Note: buffered frames are returned only after DTIM beacon frame was
* generated with ieee80211_beacon_get() and the low-level driver must thus
@@ -2276,21 +3762,232 @@ struct sk_buff *
ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
/**
- * ieee80211_get_tkip_key - get a TKIP rc4 for skb
+ * ieee80211_get_tkip_p1k_iv - get a TKIP phase 1 key for IV32
+ *
+ * This function returns the TKIP phase 1 key for the given IV32.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @iv32: IV32 to get the P1K for
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
+ u32 iv32, u16 *p1k);
+
+/**
+ * ieee80211_get_tkip_p1k - get a TKIP phase 1 key
*
- * This function computes a TKIP rc4 key for an skb. It computes
- * a phase 1 key if needed (iv16 wraps around). This function is to
- * be used by drivers which can do HW encryption but need to compute
- * to phase 1/2 key in SW.
+ * This function returns the TKIP phase 1 key for the IV32 taken
+ * from the given packet.
*
* @keyconf: the parameter passed with the set key
- * @skb: the skb for which the key is needed
- * @type: TBD
- * @key: a buffer to which the key will be written
+ * @skb: the packet to take the IV32 value from that will be encrypted
+ * with this P1K
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+static inline void ieee80211_get_tkip_p1k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb, u16 *p1k)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ u32 iv32 = get_unaligned_le32(&data[4]);
+
+ ieee80211_get_tkip_p1k_iv(keyconf, iv32, p1k);
+}
+
+/**
+ * ieee80211_get_tkip_rx_p1k - get a TKIP phase 1 key for RX
+ *
+ * This function returns the TKIP phase 1 key for the given IV32
+ * and transmitter address.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @ta: TA that will be used with the key
+ * @iv32: IV32 to get the P1K for
+ * @p1k: a buffer to which the key will be written, as 5 u16 values
+ */
+void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
+ const u8 *ta, u32 iv32, u16 *p1k);
+
+/**
+ * ieee80211_get_tkip_p2k - get a TKIP phase 2 key
+ *
+ * This function computes the TKIP RC4 key for the IV values
+ * in the packet.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @skb: the packet to take the IV32/IV16 values from that will be
+ * encrypted with this key
+ * @p2k: a buffer to which the key will be written, 16 bytes
+ */
+void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb, u8 *p2k);
+
+/**
+ * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
+ *
+ * This function computes the two AES-CMAC sub-keys, based on the
+ * previously installed master key.
+ *
+ * @keyconf: the parameter passed with the set key
+ * @k1: a buffer to be filled with the 1st sub-key
+ * @k2: a buffer to be filled with the 2nd sub-key
+ */
+void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
+ u8 *k1, u8 *k2);
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ */
+struct ieee80211_key_seq {
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[6];
+ } ccmp;
+ struct {
+ u8 pn[6];
+ } aes_cmac;
+ };
+};
+
+/**
+ * ieee80211_get_key_tx_seq - get key TX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @seq: buffer to receive the sequence data
+ *
+ * This function allows a driver to retrieve the current TX IV/PN
+ * for the given key. It must not be called if IV generation is
+ * offloaded to the device.
+ *
+ * Note that this function may only be called when no TX processing
+ * can be done concurrently, for example when queues are stopped
+ * and the stop has been synchronized.
+ */
+void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_get_key_rx_seq - get key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: buffer to receive the sequence data
+ *
+ * This function allows a driver to retrieve the current RX IV/PNs
+ * for the given key. It must not be called if IV checking is done
+ * by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_tx_seq - set key TX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current TX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and the
+ * device may have transmitted frames using the PTK, e.g. replies to
+ * ARP requests.
+ *
+ * Note that this function may only be called when no TX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_rx_seq - set key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current RX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and GTK
+ * rekey may have been done while suspended. It should not be called
+ * if IV checking is done by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_remove_key - remove the given key
+ * @keyconf: the parameter passed with the set key
+ *
+ * Remove the given key. If the key was uploaded to the hardware at the
+ * time this function is called, it is not deleted in the hardware but
+ * instead assumed to have been removed already.
+ *
+ * Note that due to locking considerations this function can (currently)
+ * only be called during key iteration (ieee80211_iter_keys().)
+ */
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
+ * @vif: the virtual interface to add the key on
+ * @keyconf: new key data
+ *
+ * When GTK rekeying was done while the system was suspended, (a) new
+ * key(s) will be available. These will be needed by mac80211 for proper
+ * RX processing, so this function allows setting them.
+ *
+ * The function returns the newly allocated key structure, which will
+ * have similar contents to the passed key configuration but point to
+ * mac80211-owned memory. In case of errors, the function returns an
+ * ERR_PTR(), use IS_ERR() etc.
+ *
+ * Note that this function assumes the key isn't added to hardware
+ * acceleration, so no TX will be done with the key. Since it's a GTK
+ * on managed (station) networks, this is true anyway. If the driver
+ * calls this function from the resume callback and subsequently uses
+ * the return code 1 to reconfigure the device, this key will be part
+ * of the reconfiguration.
+ *
+ * Note that the driver should also call ieee80211_set_key_rx_seq()
+ * for the new key for each TID to set up sequence counters properly.
+ *
+ * IMPORTANT: If this replaces a key that is present in the hardware,
+ * then it will attempt to remove it during this call. In many cases
+ * this isn't what you want, so call ieee80211_remove_key() first for
+ * the key that's being replaced.
*/
-void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
- struct sk_buff *skb,
- enum ieee80211_tkip_key_type type, u8 *key);
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
+ * @vif: virtual interface the rekeying was done on
+ * @bssid: The BSSID of the AP, for checking association
+ * @replay_ctr: the new replay counter after GTK rekeying
+ * @gfp: allocation flags
+ */
+void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
+ const u8 *replay_ctr, gfp_t gfp);
+
/**
* ieee80211_wake_queue - wake specific queue
* @hw: pointer as obtained from ieee80211_alloc_hw().
@@ -2315,6 +4012,8 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
* @queue: queue number (counted from zero).
*
* Drivers should use this function instead of netif_stop_queue.
+ *
+ * Return: %true if the queue is stopped. %false otherwise.
*/
int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
@@ -2349,6 +4048,43 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
/**
+ * ieee80211_sched_scan_results - got results from scheduled scan
+ *
+ * When a scheduled scan is running, this function needs to be called by the
+ * driver whenever there are new scan results available.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+void ieee80211_sched_scan_results(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_sched_scan_stopped - inform that the scheduled scan has stopped
+ *
+ * When a scheduled scan is running, this function can be called by
+ * the driver if it needs to stop the scan to perform another task.
+ * Usual scenarios are drivers that cannot continue the scheduled scan
+ * while associating, for instance.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
+
+/**
+ * enum ieee80211_interface_iteration_flags - interface iteration flags
+ * @IEEE80211_IFACE_ITER_NORMAL: Iterate over all interfaces that have
+ * been added to the driver; However, note that during hardware
+ * reconfiguration (after restart_hw) it will iterate over a new
+ * interface and over all the existing interfaces even if they
+ * haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
+ * interfaces, even if they haven't been re-added to the driver yet.
+ */
+enum ieee80211_interface_iteration_flags {
+ IEEE80211_IFACE_ITER_NORMAL = 0,
+ IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+};
+
+/**
* ieee80211_iterate_active_interfaces - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
@@ -2356,13 +4092,15 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
* This function allows the iterator function to sleep, when the iterator
* function is atomic @ieee80211_iterate_active_interfaces_atomic can
* be used.
- * Does not iterate over a new interface during add_interface()
+ * Does not iterate over a new interface during add_interface().
*
* @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
* @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
+ u32 iter_flags,
void (*iterator)(void *data, u8 *mac,
struct ieee80211_vif *vif),
void *data);
@@ -2374,19 +4112,40 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
* hardware that are currently active and calls the callback for them.
* This function requires the iterator callback function to be atomic,
* if that is not desired, use @ieee80211_iterate_active_interfaces instead.
- * Does not iterate over a new interface during add_interface()
+ * Does not iterate over a new interface during add_interface().
*
* @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
* @iterator: the iterator function to call, cannot sleep
* @data: first argument of the iterator function
*/
void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
+ u32 iter_flags,
void (*iterator)(void *data,
u8 *mac,
struct ieee80211_vif *vif),
void *data);
/**
+ * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This version can only be used while holding the RTNL.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
+ u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
* Drivers and mac80211 use this to add work onto the mac80211 workqueue.
@@ -2415,6 +4174,7 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
* ieee80211_start_tx_ba_session - Start a tx Block Ack session.
* @sta: the station for which to start a BA session
* @tid: the TID to BA on.
+ * @timeout: session timeout value (in TUs)
*
* Return: success if addBA request was sent, failure otherwise
*
@@ -2422,7 +4182,8 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
* the need to start aggregation on a certain RA/TID, the session level
* will be managed by the mac80211.
*/
-int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
+int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
+ u16 timeout);
/**
* ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
@@ -2469,7 +4230,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
* @vif: virtual interface to look for station on
* @addr: station's address
*
- * This function must be called under RCU lock and the
+ * Return: The station, if found. %NULL otherwise.
+ *
+ * Note: This function must be called under RCU lock and the
* resulting pointer is only valid under RCU lock as well.
*/
struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
@@ -2482,7 +4245,9 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
* @addr: remote station's address
* @localaddr: local address (vif->sdata->vif.addr). Use NULL for 'any'.
*
- * This function must be called under RCU lock and the
+ * Return: The station, if found. %NULL otherwise.
+ *
+ * Note: This function must be called under RCU lock and the
* resulting pointer is only valid under RCU lock as well.
*
* NOTE: You may pass NULL for localaddr, but then you will just get
@@ -2532,6 +4297,80 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta, bool block);
/**
+ * ieee80211_sta_eosp - notify mac80211 about end of SP
+ * @pubsta: the station
+ *
+ * When a device transmits frames in a way that it can't tell
+ * mac80211 in the TX status about the EOSP, it must clear the
+ * %IEEE80211_TX_STATUS_EOSP bit and call this function instead.
+ * This applies for PS-Poll as well as uAPSD.
+ *
+ * Note that just like with _tx_status() and _rx() drivers must
+ * not mix calls to irqsafe/non-irqsafe versions, this function
+ * must not be mixed with those either. Use the all irqsafe, or
+ * all non-irqsafe, don't mix!
+ *
+ * NB: the _irqsafe version of this function doesn't exist, no
+ * driver needs it right now. Don't call this function if
+ * you'd need the _irqsafe version, look at the git history
+ * and restore the _irqsafe version!
+ */
+void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
+
+/**
+ * ieee80211_iter_keys - iterate keys programmed into the device
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface to iterate, may be %NULL for all
+ * @iter: iterator function that will be called for each key
+ * @iter_data: custom data to pass to the iterator function
+ *
+ * This function can be used to iterate all the keys known to
+ * mac80211, even those that weren't previously programmed into
+ * the device. This is intended for use in WoWLAN if the device
+ * needs reprogramming of the keys during suspend. Note that due
+ * to locking reasons, it is also only safe to call this at few
+ * spots since it must hold the RTNL and be able to sleep.
+ *
+ * The order in which the keys are iterated matches the order
+ * in which they were originally installed and handed to the
+ * set_key callback.
+ */
+void ieee80211_iter_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data);
+
+/**
+ * ieee80211_iter_chan_contexts_atomic - iterate channel contexts
+ * @hw: pointre obtained from ieee80211_alloc_hw().
+ * @iter: iterator function
+ * @iter_data: data passed to iterator function
+ *
+ * Iterate all active channel contexts. This function is atomic and
+ * doesn't acquire any locks internally that might be held in other
+ * places while calling into the driver.
+ *
+ * The iterator will not find a context that's being added (during
+ * the driver callback to add it) but will find it while it's being
+ * removed.
+ *
+ * Note that during hardware restart, all contexts that existed
+ * before the restart are considered already present so will be
+ * found while iterating, whether they've been re-added already
+ * or not.
+ */
+void ieee80211_iter_chan_contexts_atomic(
+ struct ieee80211_hw *hw,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data),
+ void *iter_data);
+
+/**
* ieee80211_ap_probereq_get - retrieve a Probe Request template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -2541,7 +4380,9 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
* information. This function must only be called from within the
* .bss_info_changed callback function and only in managed mode. The function
* is only useful when the interface is associated, otherwise it will return
- * NULL.
+ * %NULL.
+ *
+ * Return: The Probe Request template. %NULL on error.
*/
struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -2551,7 +4392,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER and
+ * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER and
* %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
* hardware is not receiving beacons with this function.
*/
@@ -2562,9 +4403,11 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER, and
+ * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER, and
* %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
* needs to inform if the connection to the AP has been lost.
+ * The function may also be called if the connection needs to be terminated
+ * for some other reason, even if %IEEE80211_HW_CONNECTION_MONITOR isn't set.
*
* This function will cause immediate change to disassociated state,
* without connection recovery attempts.
@@ -2572,34 +4415,27 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
void ieee80211_connection_loss(struct ieee80211_vif *vif);
/**
- * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm
+ * ieee80211_resume_disconnect - disconnect from AP after resume
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * Some hardware require full power save to manage simultaneous BT traffic
- * on the WLAN frequency. Full PSM is required periodically, whenever there are
- * burst of BT traffic. The hardware gets information of BT traffic via
- * hardware co-existence lines, and consequentially requests mac80211 to
- * (temporarily) enter full psm.
- * This function will only temporarily disable dynamic PS, not enable PSM if
- * it was not already enabled.
- * The driver must make sure to re-enable dynamic PS using
- * ieee80211_enable_dyn_ps() if the driver has disabled it.
- *
+ * Instructs mac80211 to disconnect from the AP after resume.
+ * Drivers can use this after WoWLAN if they know that the
+ * connection cannot be kept up, for example because keys were
+ * used while the device was asleep but the replay counters or
+ * similar cannot be retrieved from the device during resume.
+ *
+ * Note that due to implementation issues, if the driver uses
+ * the reconfiguration functionality during resume the interface
+ * will still be added as associated first during resume and then
+ * disconnect normally later.
+ *
+ * This function can only be called from the resume callback and
+ * the driver must not be holding any of its own locks while it
+ * calls this function, or at least not any locks it needs in the
+ * key configuration paths (if it supports HW crypto).
*/
-void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif);
-
-/**
- * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled
- *
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
- *
- * This function restores dynamic PS after being temporarily disabled via
- * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must
- * be coupled with an eventual call to this function.
- *
- */
-void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
+void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
/**
* ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
@@ -2609,7 +4445,7 @@ void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
* @rssi_event: the RSSI trigger event type
* @gfp: context flags
*
- * When the %IEEE80211_HW_SUPPORTS_CQM_RSSI is set, and a connection quality
+ * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality
* monitoring is configured with an rssi threshold, the driver will inform
* whenever the rssi level reaches the threshold.
*/
@@ -2618,6 +4454,13 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
gfp_t gfp);
/**
+ * ieee80211_radar_detected - inform that a radar was detected
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_radar_detected(struct ieee80211_hw *hw);
+
+/**
* ieee80211_chswitch_done - Complete channel switch process
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @success: make the channel switch successful or not
@@ -2640,32 +4483,48 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_mode);
/**
- * ieee80211_key_removed - disable hw acceleration for key
- * @key_conf: The key hw acceleration should be disabled for
+ * ieee80211_ready_on_channel - notification of remain-on-channel start
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_ready_on_channel(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_remain_on_channel_expired - remain_on_channel duration expired
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ */
+void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
+
+/**
+ * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions
*
- * This allows drivers to indicate that the given key has been
- * removed from hardware acceleration, due to a new key that
- * was added. Don't use this if the key can continue to be used
- * for TX, if the key restriction is on RX only it is permitted
- * to keep the key for TX only and not call this function.
+ * in order not to harm the system performance and user experience, the device
+ * may request not to allow any rx ba session and tear down existing rx ba
+ * sessions based on system constraints such as periodic BT activity that needs
+ * to limit wlan activity (eg.sco or a2dp)."
+ * in such cases, the intention is to limit the duration of the rx ppdu and
+ * therefore prevent the peer device to use a-mpdu aggregation.
*
- * Due to locking constraints, it may only be called during
- * @set_key. This function must be allowed to sleep, and the
- * key it tries to disable may still be used until it returns.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ba_rx_bitmap: Bit map of open rx ba per tid
+ * @addr: & to bssid mac address
*/
-void ieee80211_key_removed(struct ieee80211_key_conf *key_conf);
-
-/* Rate control API */
+void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+ const u8 *addr);
/**
- * enum rate_control_changed - flags to indicate which parameter changed
+ * ieee80211_send_bar - send a BlockAckReq frame
+ *
+ * can be used to flush pending frames from the peer's aggregation reorder
+ * buffer.
*
- * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
- * changed, rate control algorithm can update its internal state if needed.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ra: the peer's destination address
+ * @tid: the TID of the aggregation session
+ * @ssn: the new starting sequence number for the receiver
*/
-enum rate_control_changed {
- IEEE80211_RC_HT_CHANGED = BIT(0)
-};
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
+
+/* Rate control API */
/**
* struct ieee80211_tx_rate_control - rate control information for/from RC algo
@@ -2673,6 +4532,8 @@ enum rate_control_changed {
* @hw: The hardware the algorithm is invoked for.
* @sband: The band this frame is being transmitted on.
* @bss_conf: the current BSS configuration
+ * @skb: the skb that will be transmitted, the control information in it needs
+ * to be filled in
* @reported_rate: The rate control algorithm can fill this in to indicate
* which rate should be reported to userspace as the current rate and
* used for rate calculations in the mesh network.
@@ -2680,12 +4541,11 @@ enum rate_control_changed {
* RTS threshold
* @short_preamble: whether mac80211 will request short-preamble transmission
* if the selected rate supports it
- * @max_rate_idx: user-requested maximum rate (not MCS for now)
+ * @max_rate_idx: user-requested maximum (legacy) rate
* (deprecated; this will be removed once drivers get updated to use
* rate_idx_mask)
- * @rate_idx_mask: user-requested rate mask (not MCS for now)
- * @skb: the skb that will be transmitted, the control information in it needs
- * to be filled in
+ * @rate_idx_mask: user-requested (legacy) rate mask
+ * @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use)
* @bss: whether this frame is sent out in AP or IBSS mode
*/
struct ieee80211_tx_rate_control {
@@ -2697,22 +4557,23 @@ struct ieee80211_tx_rate_control {
bool rts, short_preamble;
u8 max_rate_idx;
u32 rate_idx_mask;
+ u8 *rate_idx_mcs_mask;
bool bss;
};
struct rate_control_ops {
- struct module *module;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void (*free)(void *priv);
void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta);
void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta,
- void *priv_sta, u32 changed,
- enum nl80211_channel_type oper_chan_type);
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed);
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
void *priv_sta);
@@ -2725,6 +4586,8 @@ struct rate_control_ops {
void (*add_sta_debugfs)(void *priv, void *priv_sta,
struct dentry *dir);
void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+
+ u32 (*get_expected_throughput)(void *priv_sta);
};
static inline int rate_supported(struct ieee80211_sta *sta,
@@ -2768,8 +4631,9 @@ rate_lowest_index(struct ieee80211_supported_band *sband,
return i;
/* warn when we cannot find a rate. */
- WARN_ON(1);
+ WARN_ON_ONCE(1);
+ /* and return 0 (the lowest index) */
return 0;
}
@@ -2785,37 +4649,57 @@ bool rate_usable_index_exists(struct ieee80211_supported_band *sband,
return false;
}
-int ieee80211_rate_control_register(struct rate_control_ops *ops);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
+/**
+ * rate_control_set_rates - pass the sta rate selection to mac80211/driver
+ *
+ * When not doing a rate control probe to test rates, rate control should pass
+ * its rate selection to mac80211. If the driver supports receiving a station
+ * rate table, it will use it to ensure that frames are always sent based on
+ * the most recent rate control module decision.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @pubsta: &struct ieee80211_sta pointer to the target destination.
+ * @rates: new tx rate set to be used for this station.
+ */
+int rate_control_set_rates(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_sta_rates *rates);
+
+int ieee80211_rate_control_register(const struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops);
static inline bool
conf_is_ht20(struct ieee80211_conf *conf)
{
- return conf->channel_type == NL80211_CHAN_HT20;
+ return conf->chandef.width == NL80211_CHAN_WIDTH_20;
}
static inline bool
conf_is_ht40_minus(struct ieee80211_conf *conf)
{
- return conf->channel_type == NL80211_CHAN_HT40MINUS;
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40 &&
+ conf->chandef.center_freq1 < conf->chandef.chan->center_freq;
}
static inline bool
conf_is_ht40_plus(struct ieee80211_conf *conf)
{
- return conf->channel_type == NL80211_CHAN_HT40PLUS;
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40 &&
+ conf->chandef.center_freq1 > conf->chandef.chan->center_freq;
}
static inline bool
conf_is_ht40(struct ieee80211_conf *conf)
{
- return conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf);
+ return conf->chandef.width == NL80211_CHAN_WIDTH_40;
}
static inline bool
conf_is_ht(struct ieee80211_conf *conf)
{
- return conf->channel_type != NL80211_CHAN_NO_HT;
+ return (conf->chandef.width != NL80211_CHAN_WIDTH_5) &&
+ (conf->chandef.width != NL80211_CHAN_WIDTH_10) &&
+ (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT);
}
static inline enum nl80211_iftype
@@ -2840,4 +4724,95 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
return ieee80211_iftype_p2p(vif->type, vif->p2p);
}
+void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
+ int rssi_min_thold,
+ int rssi_max_thold);
+
+void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_ave_rssi - report the average RSSI for the specified interface
+ *
+ * @vif: the specified virtual interface
+ *
+ * Note: This function assumes that the given vif is valid.
+ *
+ * Return: The average RSSI value for the requested interface, or 0 if not
+ * applicable.
+ */
+int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
+ * @vif: virtual interface
+ * @wakeup: wakeup reason(s)
+ * @gfp: allocation flags
+ *
+ * See cfg80211_report_wowlan_wakeup().
+ */
+void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ gfp_t gfp);
+
+/**
+ * ieee80211_tx_prepare_skb - prepare an 802.11 skb for transmission
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @vif: virtual interface
+ * @skb: frame to be sent from within the driver
+ * @band: the band to transmit on
+ * @sta: optional pointer to get the station to send the frame to
+ *
+ * Note: must be called under RCU lock
+ */
+bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct sk_buff *skb,
+ int band, struct ieee80211_sta **sta);
+
+/**
+ * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state
+ *
+ * @next_tsf: TSF timestamp of the next absent state change
+ * @has_next_tsf: next absent state change event pending
+ *
+ * @absent: descriptor bitmask, set if GO is currently absent
+ *
+ * private:
+ *
+ * @count: count fields from the NoA descriptors
+ * @desc: adjusted data from the NoA
+ */
+struct ieee80211_noa_data {
+ u32 next_tsf;
+ bool has_next_tsf;
+
+ u8 absent;
+
+ u8 count[IEEE80211_P2P_NOA_DESC_MAX];
+ struct {
+ u32 start;
+ u32 duration;
+ u32 interval;
+ } desc[IEEE80211_P2P_NOA_DESC_MAX];
+};
+
+/**
+ * ieee80211_parse_p2p_noa - initialize NoA tracking data from P2P IE
+ *
+ * @attr: P2P NoA IE
+ * @data: NoA tracking data
+ * @tsf: current TSF timestamp
+ *
+ * Return: number of successfully parsed descriptors
+ */
+int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
+ struct ieee80211_noa_data *data, u32 tsf);
+
+/**
+ * ieee80211_update_p2p_noa - get next pending P2P GO absent state change
+ *
+ * @data: NoA tracking data
+ * @tsf: current TSF timestamp
+ */
+void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
+
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
new file mode 100644
index 00000000000..a591053cae6
--- /dev/null
+++ b/include/net/mac802154.h
@@ -0,0 +1,179 @@
+/*
+ * IEEE802.15.4-2003 specification
+ *
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef NET_MAC802154_H
+#define NET_MAC802154_H
+
+#include <net/af_ieee802154.h>
+#include <linux/skbuff.h>
+
+/* General MAC frame format:
+ * 2 bytes: Frame Control
+ * 1 byte: Sequence Number
+ * 20 bytes: Addressing fields
+ * 14 bytes: Auxiliary Security Header
+ */
+#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
+
+/* The following flags are used to indicate changed address settings from
+ * the stack to the hardware.
+ */
+
+/* indicates that the Short Address changed */
+#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001
+/* indicates that the IEEE Address changed */
+#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002
+/* indicates that the PAN ID changed */
+#define IEEE802515_AFILT_PANID_CHANGED 0x00000004
+/* indicates that PAN Coordinator status changed */
+#define IEEE802515_AFILT_PANC_CHANGED 0x00000008
+
+struct ieee802154_hw_addr_filt {
+ __le16 pan_id; /* Each independent PAN selects a unique
+ * identifier. This PAN id allows communication
+ * between devices within a network using short
+ * addresses and enables transmissions between
+ * devices across independent networks.
+ */
+ __le16 short_addr;
+ __le64 ieee_addr;
+ u8 pan_coord;
+};
+
+struct ieee802154_dev {
+ /* filled by the driver */
+ int extra_tx_headroom;
+ u32 flags;
+ struct device *parent;
+
+ /* filled by mac802154 core */
+ struct ieee802154_hw_addr_filt hw_filt;
+ void *priv;
+ struct wpan_phy *phy;
+};
+
+/* Checksum is in hardware and is omitted from a packet
+ *
+ * These following flags are used to indicate hardware capabilities to
+ * the stack. Generally, flags here should have their meaning
+ * done in a way that the simplest hardware doesn't need setting
+ * any particular flags. There are some exceptions to this rule,
+ * however, so you are advised to review these flags carefully.
+ */
+
+/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
+#define IEEE802154_HW_OMIT_CKSUM 0x00000001
+/* Indicates that receiver will autorespond with ACK frames. */
+#define IEEE802154_HW_AACK 0x00000002
+
+/* struct ieee802154_ops - callbacks from mac802154 to the driver
+ *
+ * This structure contains various callbacks that the driver may
+ * handle or, in some cases, must handle, for example to transmit
+ * a frame.
+ *
+ * start: Handler that 802.15.4 module calls for device initialization.
+ * This function is called before the first interface is attached.
+ *
+ * stop: Handler that 802.15.4 module calls for device cleanup.
+ * This function is called after the last interface is removed.
+ *
+ * xmit: Handler that 802.15.4 module calls for each transmitted frame.
+ * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * The low-level driver should send the frame based on available
+ * configuration.
+ * This function should return zero or negative errno. Called with
+ * pib_lock held.
+ *
+ * ed: Handler that 802.15.4 module calls for Energy Detection.
+ * This function should place the value for detected energy
+ * (usually device-dependant) in the level pointer and return
+ * either zero or negative errno. Called with pib_lock held.
+ *
+ * set_channel:
+ * Set radio for listening on specific channel.
+ * Set the device for listening on specified channel.
+ * Returns either zero, or negative errno. Called with pib_lock held.
+ *
+ * set_hw_addr_filt:
+ * Set radio for listening on specific address.
+ * Set the device for listening on specified address.
+ * Returns either zero, or negative errno.
+ *
+ * set_txpower:
+ * Set radio transmit power in dB. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_lbt
+ * Enables or disables listen before talk on the device. Called with
+ * pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_mode
+ * Sets the CCA mode used by the device. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_ed_level
+ * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * held.
+ * Returns either zero, or negative errno.
+ *
+ * set_csma_params
+ * Sets the CSMA parameter set for the PHY. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_frame_retries
+ * Sets the retransmission attempt limit. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ */
+struct ieee802154_ops {
+ struct module *owner;
+ int (*start)(struct ieee802154_dev *dev);
+ void (*stop)(struct ieee802154_dev *dev);
+ int (*xmit)(struct ieee802154_dev *dev,
+ struct sk_buff *skb);
+ int (*ed)(struct ieee802154_dev *dev, u8 *level);
+ int (*set_channel)(struct ieee802154_dev *dev,
+ int page,
+ int channel);
+ int (*set_hw_addr_filt)(struct ieee802154_dev *dev,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed);
+ int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr);
+ int (*set_txpower)(struct ieee802154_dev *dev, int db);
+ int (*set_lbt)(struct ieee802154_dev *dev, bool on);
+ int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
+ int (*set_cca_ed_level)(struct ieee802154_dev *dev,
+ s32 level);
+ int (*set_csma_params)(struct ieee802154_dev *dev,
+ u8 min_be, u8 max_be, u8 retries);
+ int (*set_frame_retries)(struct ieee802154_dev *dev,
+ s8 retries);
+};
+
+/* Basic interface to register ieee802154 device */
+struct ieee802154_dev *
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
+void ieee802154_free_device(struct ieee802154_dev *dev);
+int ieee802154_register_device(struct ieee802154_dev *dev);
+void ieee802154_unregister_device(struct ieee802154_dev *dev);
+
+void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb,
+ u8 lqi);
+
+#endif /* NET_MAC802154_H */
diff --git a/include/net/mip6.h b/include/net/mip6.h
index 26ba99b5a4b..0386b618908 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Authors:
diff --git a/include/net/mld.h b/include/net/mld.h
index 467143cd4e2..faa1d161bf2 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -63,13 +63,48 @@ struct mld2_query {
#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
-/* Max Response Code */
-#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
-#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
- ((value) < (thresh) ? (value) : \
- ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
- (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
-
-#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
+/* RFC3810, 5.1.3. Maximum Response Code:
+ *
+ * If Maximum Response Code >= 32768, Maximum Response Code represents a
+ * floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007)
+#define MLDV2_MRC_MAN(value) ((value) & 0x0fff)
+
+/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code):
+ *
+ * If QQIC >= 128, QQIC represents a floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
+#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+
+static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
+{
+ /* RFC3810, 5.1.3. Maximum Response Code */
+ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
+
+ if (mc_mrc < 32768) {
+ ret = mc_mrc;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_MRC_EXP(mc_mrc);
+ mc_man = MLDV2_MRC_MAN(mc_mrc);
+
+ ret = (mc_man | 0x1000) << (mc_exp + 3);
+ }
+
+ return ret;
+}
#endif
diff --git a/include/net/mrp.h b/include/net/mrp.h
new file mode 100644
index 00000000000..31912c3be77
--- /dev/null
+++ b/include/net/mrp.h
@@ -0,0 +1,142 @@
+#ifndef _NET_MRP_H
+#define _NET_MRP_H
+
+#define MRP_END_MARK 0x0
+
+struct mrp_pdu_hdr {
+ u8 version;
+};
+
+struct mrp_msg_hdr {
+ u8 attrtype;
+ u8 attrlen;
+};
+
+struct mrp_vecattr_hdr {
+ __be16 lenflags;
+ unsigned char firstattrvalue[];
+#define MRP_VECATTR_HDR_LEN_MASK cpu_to_be16(0x1FFF)
+#define MRP_VECATTR_HDR_FLAG_LA cpu_to_be16(0x2000)
+};
+
+enum mrp_vecattr_event {
+ MRP_VECATTR_EVENT_NEW,
+ MRP_VECATTR_EVENT_JOIN_IN,
+ MRP_VECATTR_EVENT_IN,
+ MRP_VECATTR_EVENT_JOIN_MT,
+ MRP_VECATTR_EVENT_MT,
+ MRP_VECATTR_EVENT_LV,
+ __MRP_VECATTR_EVENT_MAX
+};
+
+struct mrp_skb_cb {
+ struct mrp_msg_hdr *mh;
+ struct mrp_vecattr_hdr *vah;
+ unsigned char attrvalue[];
+};
+
+static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
+ FIELD_SIZEOF(struct sk_buff, cb));
+ return (struct mrp_skb_cb *)skb->cb;
+}
+
+enum mrp_applicant_state {
+ MRP_APPLICANT_INVALID,
+ MRP_APPLICANT_VO,
+ MRP_APPLICANT_VP,
+ MRP_APPLICANT_VN,
+ MRP_APPLICANT_AN,
+ MRP_APPLICANT_AA,
+ MRP_APPLICANT_QA,
+ MRP_APPLICANT_LA,
+ MRP_APPLICANT_AO,
+ MRP_APPLICANT_QO,
+ MRP_APPLICANT_AP,
+ MRP_APPLICANT_QP,
+ __MRP_APPLICANT_MAX
+};
+#define MRP_APPLICANT_MAX (__MRP_APPLICANT_MAX - 1)
+
+enum mrp_event {
+ MRP_EVENT_NEW,
+ MRP_EVENT_JOIN,
+ MRP_EVENT_LV,
+ MRP_EVENT_TX,
+ MRP_EVENT_R_NEW,
+ MRP_EVENT_R_JOIN_IN,
+ MRP_EVENT_R_IN,
+ MRP_EVENT_R_JOIN_MT,
+ MRP_EVENT_R_MT,
+ MRP_EVENT_R_LV,
+ MRP_EVENT_R_LA,
+ MRP_EVENT_REDECLARE,
+ MRP_EVENT_PERIODIC,
+ __MRP_EVENT_MAX
+};
+#define MRP_EVENT_MAX (__MRP_EVENT_MAX - 1)
+
+enum mrp_tx_action {
+ MRP_TX_ACTION_NONE,
+ MRP_TX_ACTION_S_NEW,
+ MRP_TX_ACTION_S_JOIN_IN,
+ MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
+ MRP_TX_ACTION_S_IN_OPTIONAL,
+ MRP_TX_ACTION_S_LV,
+};
+
+struct mrp_attr {
+ struct rb_node node;
+ enum mrp_applicant_state state;
+ u8 type;
+ u8 len;
+ unsigned char value[];
+};
+
+enum mrp_applications {
+ MRP_APPLICATION_MVRP,
+ __MRP_APPLICATION_MAX
+};
+#define MRP_APPLICATION_MAX (__MRP_APPLICATION_MAX - 1)
+
+struct mrp_application {
+ enum mrp_applications type;
+ unsigned int maxattr;
+ struct packet_type pkttype;
+ unsigned char group_address[ETH_ALEN];
+ u8 version;
+};
+
+struct mrp_applicant {
+ struct mrp_application *app;
+ struct net_device *dev;
+ struct timer_list join_timer;
+ struct timer_list periodic_timer;
+
+ spinlock_t lock;
+ struct sk_buff_head queue;
+ struct sk_buff *pdu;
+ struct rb_root mad;
+ struct rcu_head rcu;
+};
+
+struct mrp_port {
+ struct mrp_applicant __rcu *applicants[MRP_APPLICATION_MAX + 1];
+ struct rcu_head rcu;
+};
+
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
+
+int mrp_request_join(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
+
+#endif /* _NET_MRP_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index e0e594f8e9d..6bbda34d5e5 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -34,6 +34,7 @@ enum {
__ND_OPT_ARRAY_MAX,
ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
ND_OPT_RDNSS = 25, /* RFC5006 */
+ ND_OPT_DNSSL = 31, /* RFC6106 */
__ND_OPT_MAX
};
@@ -42,12 +43,13 @@ enum {
#define ND_REACHABLE_TIME (30*HZ)
#define ND_RETRANS_TIMER HZ
-#ifdef __KERNEL__
-
#include <linux/compiler.h>
#include <linux/icmpv6.h>
#include <linux/in6.h>
#include <linux/types.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/hash.h>
#include <net/neighbour.h>
@@ -76,87 +78,162 @@ struct ra_msg {
__be32 retrans_timer;
};
+struct rd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ struct in6_addr dest;
+ __u8 opt[0];
+};
+
struct nd_opt_hdr {
__u8 nd_opt_type;
__u8 nd_opt_len;
} __packed;
+/* ND options */
+struct ndisc_options {
+ struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
+#ifdef CONFIG_IPV6_ROUTE_INFO
+ struct nd_opt_hdr *nd_opts_ri;
+ struct nd_opt_hdr *nd_opts_ri_end;
+#endif
+ struct nd_opt_hdr *nd_useropts;
+ struct nd_opt_hdr *nd_useropts_end;
+};
-extern int ndisc_init(void);
+#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
+#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
+#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
+#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
+#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
+#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
-extern void ndisc_cleanup(void);
+#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
-extern int ndisc_rcv(struct sk_buff *skb);
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+ struct ndisc_options *ndopts);
-extern void ndisc_send_ns(struct net_device *dev,
- struct neighbour *neigh,
- const struct in6_addr *solicit,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr);
+/*
+ * Return the padding between the option length and the start of the
+ * link addr. Currently only IP-over-InfiniBand needs this, although
+ * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
+ * also need a pad of 2.
+ */
+static inline int ndisc_addr_option_pad(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_INFINIBAND: return 2;
+ default: return 0;
+ }
+}
-extern void ndisc_send_rs(struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr);
+static inline int ndisc_opt_addr_space(struct net_device *dev)
+{
+ return NDISC_OPT_SPACE(dev->addr_len +
+ ndisc_addr_option_pad(dev->type));
+}
-extern void ndisc_send_redirect(struct sk_buff *skb,
- struct neighbour *neigh,
- const struct in6_addr *target);
+static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
+ struct net_device *dev)
+{
+ u8 *lladdr = (u8 *)(p + 1);
+ int lladdrlen = p->nd_opt_len << 3;
+ int prepad = ndisc_addr_option_pad(dev->type);
+ if (lladdrlen != ndisc_opt_addr_space(dev))
+ return NULL;
+ return lladdr + prepad;
+}
-extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir);
+static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
+{
+ const u32 *p32 = pkey;
-extern struct sk_buff *ndisc_build_skb(struct net_device *dev,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr,
- struct icmp6hdr *icmp6h,
- const struct in6_addr *target,
- int llinfo);
+ return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
+ (p32[1] * hash_rnd[1]) +
+ (p32[2] * hash_rnd[2]) +
+ (p32[3] * hash_rnd[3]));
+}
-extern void ndisc_send_skb(struct sk_buff *skb,
- struct net_device *dev,
- struct neighbour *neigh,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr,
- struct icmp6hdr *icmp6h);
+static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
+{
+ struct neigh_hash_table *nht;
+ const u32 *p32 = pkey;
+ struct neighbour *n;
+ u32 hash_val;
+
+ nht = rcu_dereference_bh(nd_tbl.nht);
+ hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ u32 *n32 = (u32 *) n->primary_key;
+ if (n->dev == dev &&
+ ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
+ return n;
+ }
+
+ return NULL;
+}
+static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
+{
+ struct neighbour *n;
+
+ rcu_read_lock_bh();
+ n = __ipv6_neigh_lookup_noref(dev, pkey);
+ if (n && !atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
+ rcu_read_unlock_bh();
+
+ return n;
+}
+
+int ndisc_init(void);
+int ndisc_late_init(void);
+
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
+
+int ndisc_rcv(struct sk_buff *skb);
+
+void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *solicit,
+ const struct in6_addr *daddr, const struct in6_addr *saddr);
+
+void ndisc_send_rs(struct net_device *dev,
+ const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
+
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+ int dir);
/*
* IGMP
*/
-extern int igmp6_init(void);
+int igmp6_init(void);
-extern void igmp6_cleanup(void);
+void igmp6_cleanup(void);
-extern int igmp6_event_query(struct sk_buff *skb);
+int igmp6_event_query(struct sk_buff *skb);
-extern int igmp6_event_report(struct sk_buff *skb);
+int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
-extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
- int write,
- void __user *buffer,
- size_t *lenp,
- loff_t *ppos);
-int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
#endif
-extern void inet6_ifinfo_notify(int event,
- struct inet6_dev *idev);
-
-static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
-{
-
- if (dev)
- return __neigh_lookup_errno(&nd_tbl, addr, dev);
-
- return ERR_PTR(-ENODEV);
-}
-
-
-#endif /* __KERNEL__ */
-
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4014b623880..47f425464f8 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -16,11 +16,12 @@
* - Add neighbour cache statistics like rtstat
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
@@ -37,6 +38,32 @@
struct neighbour;
+enum {
+ NEIGH_VAR_MCAST_PROBES,
+ NEIGH_VAR_UCAST_PROBES,
+ NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_RETRANS_TIME,
+ NEIGH_VAR_BASE_REACHABLE_TIME,
+ NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_GC_STALETIME,
+ NEIGH_VAR_QUEUE_LEN_BYTES,
+ NEIGH_VAR_PROXY_QLEN,
+ NEIGH_VAR_ANYCAST_DELAY,
+ NEIGH_VAR_PROXY_DELAY,
+ NEIGH_VAR_LOCKTIME,
+#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
+ /* Following are used as a second way to access one of the above */
+ NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
+ NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
+ /* Following are used by "default" only */
+ NEIGH_VAR_GC_INTERVAL,
+ NEIGH_VAR_GC_THRESH1,
+ NEIGH_VAR_GC_THRESH2,
+ NEIGH_VAR_GC_THRESH3,
+ NEIGH_VAR_MAX
+};
+
struct neigh_parms {
#ifdef CONFIG_NET_NS
struct net *net;
@@ -53,22 +80,35 @@ struct neigh_parms {
atomic_t refcnt;
struct rcu_head rcu_head;
- int base_reachable_time;
- int retrans_time;
- int gc_staletime;
int reachable_time;
- int delay_probe_time;
-
- int queue_len;
- int ucast_probes;
- int app_probes;
- int mcast_probes;
- int anycast_delay;
- int proxy_delay;
- int proxy_qlen;
- int locktime;
+ int data[NEIGH_VAR_DATA_MAX];
+ DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
+static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
+{
+ set_bit(index, p->data_state);
+ p->data[index] = val;
+}
+
+#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
+
+/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
+ * In other cases, NEIGH_VAR_SET should be used.
+ */
+#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
+#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
+
+static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
+{
+ bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
+static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
+{
+ bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
struct neigh_statistics {
unsigned long allocs; /* number of allocated neighs */
unsigned long destroys; /* number of destroyed neighs */
@@ -99,6 +139,7 @@ struct neighbour {
rwlock_t lock;
atomic_t refcnt;
struct sk_buff_head arp_queue;
+ unsigned int arp_queue_len_bytes;
struct timer_list timer;
unsigned long used;
atomic_t probes;
@@ -108,8 +149,8 @@ struct neighbour {
__u8 dead;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
- struct hh_cache *hh;
- int (*output)(struct sk_buff *skb);
+ struct hh_cache hh;
+ int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct rcu_head rcu;
struct net_device *dev;
@@ -118,12 +159,10 @@ struct neighbour {
struct neigh_ops {
int family;
- void (*solicit)(struct neighbour *, struct sk_buff*);
- void (*error_report)(struct neighbour *, struct sk_buff*);
- int (*output)(struct sk_buff*);
- int (*connected_output)(struct sk_buff*);
- int (*hh_output)(struct sk_buff*);
- int (*queue_xmit)(struct sk_buff*);
+ void (*solicit)(struct neighbour *, struct sk_buff *);
+ void (*error_report)(struct neighbour *, struct sk_buff *);
+ int (*output)(struct neighbour *, struct sk_buff *);
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
};
struct pneigh_entry {
@@ -140,10 +179,12 @@ struct pneigh_entry {
* neighbour table manipulation
*/
+#define NEIGH_NUM_HASH_RND 4
+
struct neigh_hash_table {
struct neighbour __rcu **hash_buckets;
- unsigned int hash_mask;
- __u32 hash_rnd;
+ unsigned int hash_shift;
+ __u32 hash_rnd[NEIGH_NUM_HASH_RND];
struct rcu_head rcu;
};
@@ -155,14 +196,13 @@ struct neigh_table {
int key_len;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
- __u32 hash_rnd);
+ __u32 *hash_rnd);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
- /* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;
@@ -174,12 +214,24 @@ struct neigh_table {
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
- struct kmem_cache *kmem_cachep;
struct neigh_statistics __percpu *stats;
struct neigh_hash_table __rcu *nht;
struct pneigh_entry **phash_buckets;
};
+static inline int neigh_parms_family(struct neigh_parms *p)
+{
+ return p->tbl->family;
+}
+
+#define NEIGH_PRIV_ALIGN sizeof(long long)
+#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+ return (char *)n + n->tbl->entry_size;
+}
+
/* flags for neigh_update() */
#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
@@ -187,61 +239,68 @@ struct neigh_table {
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
-extern void neigh_table_init(struct neigh_table *tbl);
-extern void neigh_table_init_no_netlink(struct neigh_table *tbl);
-extern int neigh_table_clear(struct neigh_table *tbl);
-extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev);
-extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
- struct net *net,
- const void *pkey);
-extern struct neighbour * neigh_create(struct neigh_table *tbl,
+void neigh_table_init(struct neigh_table *tbl);
+int neigh_table_clear(struct neigh_table *tbl);
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev);
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+ const void *pkey);
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref);
+static inline struct neighbour *neigh_create(struct neigh_table *tbl,
const void *pkey,
- struct net_device *dev);
-extern void neigh_destroy(struct neighbour *neigh);
-extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- u32 flags);
-extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_resolve_output(struct sk_buff *skb);
-extern int neigh_connected_output(struct sk_buff *skb);
-extern int neigh_compat_output(struct sk_buff *skb);
-extern struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+ struct net_device *dev)
+{
+ return __neigh_create(tbl, pkey, dev, true);
+}
+void neigh_destroy(struct neighbour *neigh);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+void __neigh_set_probe_once(struct neighbour *neigh);
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev);
-extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
-extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl);
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
static inline
-struct net *neigh_parms_net(const struct neigh_parms *parms)
+struct net *neigh_parms_net(const struct neigh_parms *parms)
{
return read_pnet(&parms->net);
}
-extern unsigned long neigh_rand_reach_time(unsigned long base);
+unsigned long neigh_rand_reach_time(unsigned long base);
-extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
- struct sk_buff *skb);
-extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat);
-extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
- struct net *net,
- const void *key,
- struct net_device *dev);
-extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev);
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb);
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev,
+ int creat);
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev);
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev);
-static inline
-struct net *pneigh_net(const struct pneigh_entry *pneigh)
+static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
{
return read_pnet(&pneigh->net);
}
-extern void neigh_app_ns(struct neighbour *n);
-extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
-extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
-extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
+void neigh_app_ns(struct neighbour *n);
+void neigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct neighbour *, void *), void *cookie);
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *));
+void pneigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
@@ -255,15 +314,23 @@ struct neigh_seq_state {
#define NEIGH_SEQ_IS_PNEIGH 0x00000002
#define NEIGH_SEQ_SKIP_NOARP 0x00000004
};
-extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
-extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
-extern void neigh_seq_stop(struct seq_file *, void *);
-
-extern int neigh_sysctl_register(struct net_device *dev,
- struct neigh_parms *p,
- char *p_name,
- proc_handler *proc_handler);
-extern void neigh_sysctl_unregister(struct neigh_parms *p);
+void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
+ unsigned int);
+void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+void neigh_seq_stop(struct seq_file *, void *);
+
+int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ proc_handler *proc_handler);
+void neigh_sysctl_unregister(struct neigh_parms *p);
static inline void __neigh_parms_put(struct neigh_parms *parms)
{
@@ -295,12 +362,6 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) atomic_inc(&(n)->refcnt)
-static inline void neigh_confirm(struct neighbour *neigh)
-{
- if (neigh)
- neigh->confirmed = jiffies;
-}
-
static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
unsigned long now = jiffies;
@@ -315,7 +376,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
#ifdef CONFIG_BRIDGE_NETFILTER
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
- unsigned seq, hh_alen;
+ unsigned int seq, hh_alen;
do {
seq = read_seqbegin(&hh->hh_lock);
@@ -326,22 +387,26 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
}
#endif
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
- unsigned seq;
+ unsigned int seq;
int hh_len;
do {
- int hh_alen;
-
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
- hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(hh_len <= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ } else {
+ int hh_alen = HH_DATA_ALIGN(hh_len);
+
+ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ }
} while (read_seqretry(&hh->hh_lock, seq));
skb_push(skb, hh_len);
- return hh->hh_output(skb);
+ return dev_queue_xmit(skb);
}
static inline struct neighbour *
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1bf812b21fb..361d2607719 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -4,37 +4,48 @@
#ifndef __NET_NET_NAMESPACE_H
#define __NET_NET_NAMESPACE_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/list.h>
+#include <linux/sysctl.h>
+#include <net/flow.h>
#include <net/netns/core.h>
#include <net/netns/mib.h>
#include <net/netns/unix.h>
#include <net/netns/packet.h>
#include <net/netns/ipv4.h>
#include <net/netns/ipv6.h>
+#include <net/netns/ieee802154_6lowpan.h>
+#include <net/netns/sctp.h>
#include <net/netns/dccp.h>
+#include <net/netns/netfilter.h>
#include <net/netns/x_tables.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
+#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+struct user_namespace;
struct proc_dir_entry;
struct net_device;
struct sock;
struct ctl_table_header;
struct net_generic;
struct sock;
+struct netns_ipvs;
#define NETDEV_HASHBITS 8
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
+ atomic_t passive; /* To decided when the network
+ * namespace should be freed.
+ */
atomic_t count; /* To decided when the network
- * namespace should be freed.
+ * namespace should be shut down.
*/
#ifdef NETNS_REFCNT_DEBUG
atomic_t use_count; /* To track references we
@@ -47,6 +58,10 @@ struct net {
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
+ struct user_namespace *user_ns; /* Owning user namespace */
+
+ unsigned int proc_inum;
+
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
@@ -60,6 +75,9 @@ struct net {
struct list_head dev_base_head;
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
+ unsigned int dev_base_seq; /* protected by rtnl_mutex */
+ int ifindex;
+ unsigned int dev_unreg_count;
/* core fib_rules */
struct list_head rules_ops;
@@ -71,17 +89,30 @@ struct net {
struct netns_packet packet;
struct netns_unix unx;
struct netns_ipv4 ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netns_ipv6 ipv6;
#endif
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+ struct netns_ieee802154_lowpan ieee802154_lowpan;
+#endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+ struct netns_sctp sctp;
+#endif
#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
struct netns_dccp dccp;
#endif
#ifdef CONFIG_NETFILTER
+ struct netns_nf nf;
struct netns_xt xt;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct netns_ct ct;
#endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+ struct netns_nftables nft;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ struct netns_nf_frag nf_frag;
+#endif
struct sock *nfnl;
struct sock *nfnl_stash;
#endif
@@ -94,32 +125,50 @@ struct net {
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+#if IS_ENABLED(CONFIG_IP_VS)
+ struct netns_ipvs *ipvs;
+#endif
+ struct sock *diag_nlsk;
+ atomic_t fnhe_genid;
};
-
#include <linux/seq_file_net.h>
/* Init's network namespace */
extern struct net init_net;
-#ifdef CONFIG_NET
-extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns);
-
-#else /* CONFIG_NET */
-static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns)
+#ifdef CONFIG_NET_NS
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+ struct net *old_net);
+
+#else /* CONFIG_NET_NS */
+#include <linux/sched.h>
+#include <linux/nsproxy.h>
+static inline struct net *copy_net_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct net *old_net)
{
- /* There is nothing to copy so this is a noop */
- return net_ns;
+ if (flags & CLONE_NEWNET)
+ return ERR_PTR(-EINVAL);
+ return old_net;
}
-#endif /* CONFIG_NET */
+#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
-extern struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_fd(int pid);
+
+#ifdef CONFIG_SYSCTL
+void ipx_register_sysctl(void);
+void ipx_unregister_sysctl(void);
+#else
+#define ipx_register_sysctl()
+#define ipx_unregister_sysctl()
+#endif
#ifdef CONFIG_NET_NS
-extern void __put_net(struct net *net);
+void __put_net(struct net *net);
static inline struct net *get_net(struct net *net)
{
@@ -150,6 +199,9 @@ int net_eq(const struct net *net1, const struct net *net2)
{
return net1 == net2;
}
+
+void net_drop_ns(void *);
+
#else
static inline struct net *get_net(struct net *net)
@@ -171,6 +223,8 @@ int net_eq(const struct net *net1, const struct net *net2)
{
return 1;
}
+
+#define net_drop_ns NULL
#endif
@@ -227,10 +281,12 @@ static inline struct net *read_pnet(struct net * const *pnet)
#define __net_init
#define __net_exit
#define __net_initdata
+#define __net_initconst
#else
#define __net_init __init
#define __net_exit __exit_refok
#define __net_initdata __initdata
+#define __net_initconst __initconst
#endif
struct pernet_operations {
@@ -261,19 +317,85 @@ struct pernet_operations {
* device which caused kernel oops, and panics during network
* namespace cleanup. So please don't get this wrong.
*/
-extern int register_pernet_subsys(struct pernet_operations *);
-extern void unregister_pernet_subsys(struct pernet_operations *);
-extern int register_pernet_device(struct pernet_operations *);
-extern void unregister_pernet_device(struct pernet_operations *);
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
-struct ctl_path;
struct ctl_table;
struct ctl_table_header;
-extern struct ctl_table_header *register_net_sysctl_table(struct net *net,
- const struct ctl_path *path, struct ctl_table *table);
-extern struct ctl_table_header *register_net_sysctl_rotable(
- const struct ctl_path *path, struct ctl_table *table);
-extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+#ifdef CONFIG_SYSCTL
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+ struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
+#else
+static inline int net_sysctl_init(void) { return 0; }
+static inline struct ctl_table_header *register_net_sysctl(struct net *net,
+ const char *path, struct ctl_table *table)
+{
+ return NULL;
+}
+static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+{
+}
+#endif
+
+static inline int rt_genid_ipv4(struct net *net)
+{
+ return atomic_read(&net->ipv4.rt_genid);
+}
+
+static inline void rt_genid_bump_ipv4(struct net *net)
+{
+ atomic_inc(&net->ipv4.rt_genid);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return atomic_read(&net->ipv6.rt_genid);
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+ atomic_inc(&net->ipv6.rt_genid);
+}
+#else
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return 0;
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+static inline struct netns_ieee802154_lowpan *
+net_ieee802154_lowpan(struct net *net)
+{
+ return &net->ieee802154_lowpan;
+}
+#endif
+
+/* For callers who don't really care about whether it's IPv4 or IPv6 */
+static inline void rt_genid_bump_all(struct net *net)
+{
+ rt_genid_bump_ipv4(net);
+ rt_genid_bump_ipv6(net);
+}
+
+static inline int fnhe_genid(struct net *net)
+{
+ return atomic_read(&net->fnhe_genid);
+}
+
+static inline void fnhe_genid_bump(struct net *net)
+{
+ atomic_inc(&net->fnhe_genid);
+}
#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h
new file mode 100644
index 00000000000..7727b4247da
--- /dev/null
+++ b/include/net/net_ratelimit.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_NET_RATELIMIT_H
+#define _LINUX_NET_RATELIMIT_H
+
+#include <linux/ratelimit.h>
+
+extern struct ratelimit_state net_ratelimit_state;
+
+#endif /* _LINUX_NET_RATELIMIT_H */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index e82b7bab3ff..d8bbb38584b 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -10,24 +10,24 @@
*
* Changes:
*/
-#ifdef __KERNEL__
struct dst_entry;
+struct neighbour;
struct netevent_redirect {
struct dst_entry *old;
struct dst_entry *new;
+ struct neighbour *neigh;
+ const void *daddr;
};
enum netevent_notif_type {
NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
- NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */
NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
};
-extern int register_netevent_notifier(struct notifier_block *nb);
-extern int unregister_netevent_notifier(struct notifier_block *nb);
-extern int call_netevent_notifiers(unsigned long val, void *v);
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
#endif
-#endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 7573d52a434..981c327374d 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -16,9 +16,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
-extern int nf_conntrack_ipv4_compat_init(void);
-extern void nf_conntrack_ipv4_compat_fini(void);
-
-extern void need_ipv4_conntrack(void);
+int nf_conntrack_ipv4_compat_init(void);
+void nf_conntrack_ipv4_compat_fini(void);
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index 6b00ea38546..f01ef208dff 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -1,6 +1,6 @@
#ifndef _NF_DEFRAG_IPV4_H
#define _NF_DEFRAG_IPV4_H
-extern void nf_defrag_ipv4_enable(void);
+void nf_defrag_ipv4_enable(void);
#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
new file mode 100644
index 00000000000..931fbf81217
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -0,0 +1,128 @@
+#ifndef _IPV4_NF_REJECT_H
+#define _IPV4_NF_REJECT_H
+
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/dst.h>
+
+static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
+{
+ icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+}
+
+/* Send RST reply */
+static void nf_send_reset(struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ const struct iphdr *oiph;
+ struct iphdr *niph;
+ const struct tcphdr *oth;
+ struct tcphdr _otcph, *tcph;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return;
+
+ oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
+ sizeof(_otcph), &_otcph);
+ if (oth == NULL)
+ return;
+
+ /* No RST for RST. */
+ if (oth->rst)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ return;
+
+ /* Check checksum */
+ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
+ return;
+ oiph = ip_hdr(oldskb);
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+
+ skb_reset_network_header(nskb);
+ niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
+ niph->version = 4;
+ niph->ihl = sizeof(struct iphdr) / 4;
+ niph->tos = 0;
+ niph->id = 0;
+ niph->frag_off = htons(IP_DF);
+ niph->protocol = IPPROTO_TCP;
+ niph->check = 0;
+ niph->saddr = oiph->daddr;
+ niph->daddr = oiph->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ memset(tcph, 0, sizeof(*tcph));
+ tcph->source = oth->dest;
+ tcph->dest = oth->source;
+ tcph->doff = sizeof(struct tcphdr) / 4;
+
+ if (oth->ack)
+ tcph->seq = oth->ack_seq;
+ else {
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ oldskb->len - ip_hdrlen(oldskb) -
+ (oth->doff << 2));
+ tcph->ack = 1;
+ }
+
+ tcph->rst = 1;
+ tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+ niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)tcph - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ /* ip_route_me_harder expects skb->dst to be set */
+ skb_dst_set_noref(nskb, skb_dst(oldskb));
+
+ nskb->protocol = htons(ETH_P_IP);
+ if (ip_route_me_harder(nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+ niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
+
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(skb_dst(nskb)))
+ goto free_nskb;
+
+ nf_ct_attach(nskb, oldskb);
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ goto free_nskb;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip_local_out(nskb);
+
+ return;
+
+ free_nskb:
+ kfree_skb(nskb);
+}
+
+
+#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 1ee717eb5b0..a4c99368579 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -7,16 +7,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
- struct net_device *in,
- struct net_device *out,
- int (*okfn)(struct sk_buff *));
-
-struct inet_frags_ctl;
-
#include <linux/sysctl.h>
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 94dd54d76b4..27666d8a0bd 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -1,6 +1,13 @@
#ifndef _NF_DEFRAG_IPV6_H
#define _NF_DEFRAG_IPV6_H
-extern void nf_defrag_ipv6_enable(void);
+void nf_defrag_ipv6_enable(void);
+
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+void nf_ct_frag6_consume_orig(struct sk_buff *skb);
+
+struct inet_frags_ctl;
#endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
new file mode 100644
index 00000000000..710d17ed70b
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -0,0 +1,171 @@
+#ifndef _IPV6_NF_REJECT_H
+#define _IPV6_NF_REJECT_H
+
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_checksum.h>
+#include <linux/netfilter_ipv6.h>
+
+static inline void
+nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum)
+{
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+}
+
+/* Send RST reply */
+static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ struct tcphdr otcph, *tcph;
+ unsigned int otcplen, hh_len;
+ int tcphoff, needs_ack;
+ const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+ struct ipv6hdr *ip6h;
+#define DEFAULT_TOS_VALUE 0x0U
+ const __u8 tclass = DEFAULT_TOS_VALUE;
+ struct dst_entry *dst = NULL;
+ u8 proto;
+ __be16 frag_off;
+ struct flowi6 fl6;
+
+ if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
+ (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
+ pr_debug("addr is not unicast.\n");
+ return;
+ }
+
+ proto = oip6h->nexthdr;
+ tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
+
+ if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
+ pr_debug("Cannot get TCP header.\n");
+ return;
+ }
+
+ otcplen = oldskb->len - tcphoff;
+
+ /* IP header checks: fragment, too short. */
+ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
+ pr_debug("proto(%d) != IPPROTO_TCP, "
+ "or too short. otcplen = %d\n",
+ proto, otcplen);
+ return;
+ }
+
+ if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
+ BUG();
+
+ /* No RST for RST. */
+ if (otcph.rst) {
+ pr_debug("RST is set\n");
+ return;
+ }
+
+ /* Check checksum. */
+ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
+ pr_debug("TCP checksum is invalid\n");
+ return;
+ }
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = oip6h->daddr;
+ fl6.daddr = oip6h->saddr;
+ fl6.fl6_sport = otcph.dest;
+ fl6.fl6_dport = otcph.source;
+ security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
+ return;
+ }
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(dst))
+ return;
+
+ hh_len = (dst->dev->hard_header_len + 15)&~15;
+ nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ + sizeof(struct tcphdr) + dst->trailer_len,
+ GFP_ATOMIC);
+
+ if (!nskb) {
+ net_dbg_ratelimited("cannot alloc skb\n");
+ dst_release(dst);
+ return;
+ }
+
+ skb_dst_set(nskb, dst);
+
+ skb_reserve(nskb, hh_len + dst->header_len);
+
+ skb_put(nskb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(nskb);
+ ip6h = ipv6_hdr(nskb);
+ ip6_flow_hdr(ip6h, tclass, 0);
+ ip6h->hop_limit = ip6_dst_hoplimit(dst);
+ ip6h->nexthdr = IPPROTO_TCP;
+ ip6h->saddr = oip6h->daddr;
+ ip6h->daddr = oip6h->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ /* Truncate to length (no data) */
+ tcph->doff = sizeof(struct tcphdr)/4;
+ tcph->source = otcph.dest;
+ tcph->dest = otcph.source;
+
+ if (otcph.ack) {
+ needs_ack = 0;
+ tcph->seq = otcph.ack_seq;
+ tcph->ack_seq = 0;
+ } else {
+ needs_ack = 1;
+ tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
+ + otcplen - (otcph.doff<<2));
+ tcph->seq = 0;
+ }
+
+ /* Reset flags */
+ ((u_int8_t *)tcph)[13] = 0;
+ tcph->rst = 1;
+ tcph->ack = needs_ack;
+ tcph->window = 0;
+ tcph->urg_ptr = 0;
+ tcph->check = 0;
+
+ /* Adjust TCP checksum */
+ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
+ &ipv6_hdr(nskb)->daddr,
+ sizeof(struct tcphdr), IPPROTO_TCP,
+ csum_partial(tcph,
+ sizeof(struct tcphdr), 0));
+
+ nf_ct_attach(nskb, oldskb);
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip6_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ nskb->protocol = htons(ETH_P_IPV6);
+ ip6h->payload_len = htons(sizeof(struct tcphdr));
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ return;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip6_local_out(nskb);
+}
+
+#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index caf17db87db..37252f71a38 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -14,10 +14,9 @@
#include <linux/netfilter/nf_conntrack_common.h>
-#ifdef __KERNEL__
#include <linux/bitops.h>
#include <linux/compiler.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_dccp.h>
@@ -40,23 +39,6 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
-/* Add protocol helper include file here */
-#include <linux/netfilter/nf_conntrack_ftp.h>
-#include <linux/netfilter/nf_conntrack_pptp.h>
-#include <linux/netfilter/nf_conntrack_h323.h>
-#include <linux/netfilter/nf_conntrack_sane.h>
-#include <linux/netfilter/nf_conntrack_sip.h>
-
-/* per conntrack: application helper private data */
-union nf_conntrack_help {
- /* insert conntrack helper private data (master) here */
- struct nf_ct_ftp_master ct_ftp_info;
- struct nf_ct_pptp_master ct_pptp_info;
- struct nf_ct_h323_master ct_h323_info;
- struct nf_ct_sane_master ct_sane_info;
- struct nf_ct_sip_master ct_sip_info;
-};
-
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
@@ -77,12 +59,13 @@ struct nf_conn_help {
/* Helper. if any */
struct nf_conntrack_helper __rcu *helper;
- union nf_conntrack_help help;
-
struct hlist_head expectations;
/* Current number of expected connections */
u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
+
+ /* private helper information. */
+ char data[];
};
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
@@ -90,10 +73,17 @@ struct nf_conn_help {
struct nf_conn {
/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
- plus 1 for any connection(s) we are `master' for */
+ * plus 1 for any connection(s) we are `master' for
+ *
+ * Hint, SKB address this struct and refcnt via skb->nfct and
+ * helpers nf_conntrack_get() and nf_conntrack_put().
+ * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+ * beware nf_ct_get() is different and don't inc refcnt.
+ */
struct nf_conntrack ct_general;
- spinlock_t lock;
+ spinlock_t lock;
+ u16 cpu;
/* XXX should I move this to the tail ? - Y.K */
/* These are my tuples; original and reply */
@@ -116,14 +106,14 @@ struct nf_conn {
u_int32_t secmark;
#endif
- /* Storage reserved for other modules: */
- union nf_conntrack_proto proto;
-
/* Extensions */
struct nf_ct_ext *ext;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
};
static inline struct nf_conn *
@@ -156,15 +146,13 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
}
/* Alter reply tuple (maybe alter helper). */
-extern void
-nf_conntrack_alter_reply(struct nf_conn *ct,
- const struct nf_conntrack_tuple *newreply);
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply);
/* Is this tuple taken? (ignoring any belonging to the given
conntrack). */
-extern int
-nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
/* Return conntrack_info and tuple hash for given skb. */
static inline struct nf_conn *
@@ -182,38 +170,34 @@ static inline void nf_ct_put(struct nf_conn *ct)
}
/* Protocol module loading */
-extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-extern void nf_ct_l3proto_module_put(unsigned short l3proto);
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
/*
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
*/
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+void nf_ct_free_hashtable(void *hash, unsigned int size);
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
-extern void nf_conntrack_hash_insert(struct nf_conn *ct);
-extern void nf_ct_delete_from_lists(struct nf_conn *ct);
-extern void nf_ct_insert_dying_list(struct nf_conn *ct);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
-extern void nf_conntrack_flush_report(struct net *net, u32 pid, int report);
+void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
-extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
- unsigned int nhoff, u_int16_t l3num,
- struct nf_conntrack_tuple *tuple);
-extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+ u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
-extern void __nf_ct_refresh_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- unsigned long extra_jiffies,
- int do_acct);
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies, int do_acct);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -232,10 +216,8 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
-extern bool __nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- int do_acct);
+bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb, int do_acct);
/* kill conntrack and do accounting */
static inline bool nf_ct_kill_acct(struct nf_conn *ct,
@@ -252,7 +234,7 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
}
/* These are for NAT. Icky. */
-extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
@@ -262,17 +244,17 @@ static inline struct nf_conn *nf_ct_untracked_get(void)
{
return &__raw_get_cpu_var(nf_conntrack_untracked);
}
-extern void nf_ct_untracked_status_or(unsigned long bits);
+void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
-extern void nf_conntrack_free(struct nf_conn *ct);
-extern struct nf_conn *
-nf_conntrack_alloc(struct net *net, u16 zone,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_tuple *repl,
- gfp_t gfp);
+void nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp);
static inline int nf_ct_is_template(const struct nf_conn *ct)
{
@@ -295,21 +277,26 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
return test_bit(IPS_UNTRACKED_BIT, &ct->status);
}
-extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+ return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
+struct kernel_param;
+
+int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
+extern unsigned int nf_conntrack_hash_rnd;
+void init_nf_conntrack_hash_rnd(void);
+
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
-#define NF_CT_STAT_INC(net, count) \
- __this_cpu_inc((net)->ct.stat->count)
-#define NF_CT_STAT_INC_ATOMIC(net, count) \
-do { \
- local_bh_disable(); \
- __this_cpu_inc((net)->ct.stat->count); \
- local_bh_enable(); \
-} while (0)
+#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define MODULE_ALIAS_NFCT_HELPER(helper) \
MODULE_ALIAS("nfct-helper-" helper)
-#endif /* __KERNEL__ */
#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 4e9c63a20db..79d8d16732b 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -15,21 +15,25 @@
#include <net/netfilter/nf_conntrack_extend.h>
struct nf_conn_counter {
- u_int64_t packets;
- u_int64_t bytes;
+ atomic64_t packets;
+ atomic64_t bytes;
+};
+
+struct nf_conn_acct {
+ struct nf_conn_counter counter[IP_CT_DIR_MAX];
};
static inline
-struct nf_conn_counter *nf_conn_acct_find(const struct nf_conn *ct)
+struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct)
{
return nf_ct_ext_find(ct, NF_CT_EXT_ACCT);
}
static inline
-struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
+struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
{
struct net *net = nf_ct_net(ct);
- struct nf_conn_counter *acct;
+ struct nf_conn_acct *acct;
if (!net->ct.sysctl_acct)
return NULL;
@@ -42,8 +46,8 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
return acct;
};
-extern unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
+unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
+ int dir);
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
@@ -57,7 +61,9 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
net->ct.sysctl_acct = enable;
}
-extern int nf_conntrack_acct_init(struct net *net);
-extern void nf_conntrack_acct_fini(struct net *net);
+int nf_conntrack_acct_pernet_init(struct net *net);
+void nf_conntrack_acct_pernet_fini(struct net *net);
+int nf_conntrack_acct_init(void);
+void nf_conntrack_acct_fini(void);
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index aced085132e..cc0c1882760 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,39 +20,42 @@
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
-extern unsigned int nf_conntrack_in(struct net *net,
- u_int8_t pf,
- unsigned int hooknum,
- struct sk_buff *skb);
-
-extern int nf_conntrack_init(struct net *net);
-extern void nf_conntrack_cleanup(struct net *net);
-
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
-
-extern bool
-nf_ct_get_tuple(const struct sk_buff *skb,
- unsigned int nhoff,
- unsigned int dataoff,
- u_int16_t l3num,
- u_int8_t protonum,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
-
-extern bool
-nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
+unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
+ struct sk_buff *skb);
+
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+
+int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_fini(struct net *net);
+
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
+
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
+
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
+
+bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
+ unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_l4proto *l4proto);
+
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
-extern int __nf_conntrack_confirm(struct sk_buff *skb);
+int __nf_conntrack_confirm(struct sk_buff *skb);
/* Confirm a connection: returns NF_DROP if packet must be dropped. */
static inline int nf_conntrack_confirm(struct sk_buff *skb)
@@ -74,6 +77,13 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto);
-extern spinlock_t nf_conntrack_lock ;
+#ifdef CONFIG_LOCKDEP
+# define CONNTRACK_LOCKS 8
+#else
+# define CONNTRACK_LOCKS 1024
+#endif
+extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+
+extern spinlock_t nf_conntrack_expect_lock;
#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 96ba5f7dcab..0e3d08e4b1d 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -17,18 +17,24 @@ struct nf_conntrack_ecache {
unsigned long missed; /* missed events */
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
- u32 pid; /* netlink pid of destroyer */
+ u32 portid; /* netlink portid of destroyer */
+ struct timer_list timeout;
};
static inline struct nf_conntrack_ecache *
nf_ct_ecache_find(const struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+ return NULL;
+#endif
}
static inline struct nf_conntrack_ecache *
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
@@ -45,13 +51,16 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
e->expmask = expmask;
}
return e;
+#else
+ return NULL;
+#endif
};
#ifdef CONFIG_NF_CONNTRACK_EVENTS
/* This structure is passed to event handler */
struct nf_ct_event {
struct nf_conn *ct;
- u32 pid;
+ u32 portid;
int report;
};
@@ -59,42 +68,42 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *nb);
-extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
e = nf_ct_ecache_find(ct);
if (e == NULL)
return;
- if (!(e->ctmask & (1 << event)))
- return;
-
set_bit(event, &e->cache);
}
static inline int
nf_conntrack_eventmask_report(unsigned int eventmask,
struct nf_conn *ct,
- u32 pid,
+ u32 portid,
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -105,11 +114,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
struct nf_ct_event item = {
.ct = ct,
- .pid = e->pid ? e->pid : pid,
+ .portid = e->portid ? e->portid : portid,
.report = report
};
/* This is a resent of a destroy event? If so, skip missed */
- unsigned long missed = e->pid ? 0 : e->missed;
+ unsigned long missed = e->portid ? 0 : e->missed;
if (!((eventmask | missed) & e->ctmask))
goto out_unlock;
@@ -119,11 +128,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
spin_lock_bh(&ct->lock);
if (ret < 0) {
/* This is a destroy event that has been
- * triggered by a process, we store the PID
+ * triggered by a process, we store the PORTID
* to include it in the retransmission. */
if (eventmask & (1 << IPCT_DESTROY) &&
- e->pid == 0 && pid != 0)
- e->pid = pid;
+ e->portid == 0 && portid != 0)
+ e->portid = portid;
else
e->missed |= eventmask;
} else
@@ -138,9 +147,9 @@ out_unlock:
static inline int
nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
- u32 pid, int report)
+ u32 portid, int report)
{
- return nf_conntrack_eventmask_report(1 << event, ct, pid, report);
+ return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
}
static inline int
@@ -151,7 +160,7 @@ nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
struct nf_exp_event {
struct nf_conntrack_expect *exp;
- u32 pid;
+ u32 portid;
int report;
};
@@ -159,21 +168,23 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *nb);
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
- u32 pid,
+ u32 portid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -184,7 +195,7 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
if (e->expmask & (1 << event)) {
struct nf_exp_event item = {
.exp = exp,
- .pid = pid,
+ .portid = portid,
.report = report
};
notify->fcn(1 << event, &item);
@@ -200,37 +211,48 @@ nf_ct_expect_event(enum ip_conntrack_expect_events event,
nf_ct_expect_event_report(event, exp, 0, 0);
}
-extern int nf_conntrack_ecache_init(struct net *net);
-extern void nf_conntrack_ecache_fini(struct net *net);
+int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
+int nf_conntrack_ecache_init(void);
+void nf_conntrack_ecache_fini(void);
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
struct nf_conn *ct) {}
static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
struct nf_conn *ct,
- u32 pid,
+ u32 portid,
int report) { return 0; }
static inline int nf_conntrack_event(enum ip_conntrack_events event,
struct nf_conn *ct) { return 0; }
static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
struct nf_conn *ct,
- u32 pid,
+ u32 portid,
int report) { return 0; }
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp) {}
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
struct nf_conntrack_expect *exp,
- u32 pid,
+ u32 portid,
int report) {}
-static inline int nf_conntrack_ecache_init(struct net *net)
+static inline int nf_conntrack_ecache_pernet_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
+{
+}
+
+static inline int nf_conntrack_ecache_init(void)
{
return 0;
}
-static inline void nf_conntrack_ecache_fini(struct net *net)
+static inline void nf_conntrack_ecache_fini(void)
{
}
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0f8a8c58753..3f3aecbc863 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -43,7 +43,7 @@ struct nf_conntrack_expect {
unsigned int class;
#ifdef CONFIG_NF_NAT_NEEDED
- __be32 saved_ip;
+ union nf_inet_addr saved_addr;
/* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */
union nf_conntrack_man_proto saved_proto;
@@ -59,16 +59,21 @@ static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
return nf_ct_net(exp->master);
}
+#define NF_CT_EXP_POLICY_NAME_LEN 16
+
struct nf_conntrack_expect_policy {
unsigned int max_expected;
unsigned int timeout;
- const char *name;
+ char name[NF_CT_EXP_POLICY_NAME_LEN];
};
#define NF_CT_EXPECT_CLASS_DEFAULT 0
-int nf_conntrack_expect_init(struct net *net);
-void nf_conntrack_expect_fini(struct net *net);
+int nf_conntrack_expect_pernet_init(struct net *net);
+void nf_conntrack_expect_pernet_fini(struct net *net);
+
+int nf_conntrack_expect_init(void);
+void nf_conntrack_expect_fini(void);
struct nf_conntrack_expect *
__nf_ct_expect_find(struct net *net, u16 zone,
@@ -83,7 +88,7 @@ nf_ct_find_expectation(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
- u32 pid, int report);
+ u32 portid, int report);
static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
{
nf_ct_unlink_expect_report(exp, 0, 0);
@@ -91,7 +96,6 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
void nf_ct_remove_expectations(struct nf_conn *ct);
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
-void nf_ct_remove_userspace_expectations(void);
/* Allocate space for an expectation: this is mandatory before calling
nf_ct_expect_related. You will have to call put afterwards. */
@@ -102,7 +106,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
u_int8_t, const __be16 *, const __be16 *);
void nf_ct_expect_put(struct nf_conntrack_expect *exp);
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
- u32 pid, int report);
+ u32 portid, int report);
static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect)
{
return nf_ct_expect_related_report(expect, 0, 0);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 0772d296dfd..55d15049ab2 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -7,24 +7,48 @@
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
+#endif
+ NF_CT_EXT_SEQADJ,
NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_CT_EXT_TSTAMP,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ NF_CT_EXT_TIMEOUT,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ NF_CT_EXT_LABELS,
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ NF_CT_EXT_SYNPROXY,
+#endif
NF_CT_EXT_NUM,
};
#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
-#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
+#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
+#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
+#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
+#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
+#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
struct rcu_head rcu;
- u8 offset[NF_CT_EXT_NUM];
- u8 len;
+ u16 offset[NF_CT_EXT_NUM];
+ u16 len;
char data[0];
};
@@ -49,7 +73,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
/* Destroy all relationships */
-extern void __nf_ct_ext_destroy(struct nf_conn *ct);
+void __nf_ct_ext_destroy(struct nf_conn *ct);
static inline void nf_ct_ext_destroy(struct nf_conn *ct)
{
if (ct->ext)
@@ -62,14 +86,17 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct)
static inline void nf_ct_ext_free(struct nf_conn *ct)
{
if (ct->ext)
- kfree(ct->ext);
+ kfree_rcu(ct->ext, rcu);
}
/* Add this type, returns pointer to data or NULL. */
-void *
-__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
+void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp);
+
#define nf_ct_ext_add(ct, id, gfp) \
- ((id##_TYPE *)__nf_ct_ext_add((ct), (id), (gfp)))
+ ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp)))
+#define nf_ct_ext_add_length(ct, id, len, gfp) \
+ ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp)))
#define NF_CT_EXT_F_PREALLOC 0x0001
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c305dbdab..6cf614bc002 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -11,18 +11,27 @@
#define _NF_CONNTRACK_HELPER_H
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_expect.h>
struct module;
+enum nf_ct_helper_flags {
+ NF_CT_HELPER_F_USERSPACE = (1 << 0),
+ NF_CT_HELPER_F_CONFIGURED = (1 << 1),
+};
+
#define NF_CT_HELPER_NAME_LEN 16
struct nf_conntrack_helper {
struct hlist_node hnode; /* Internal use. */
- const char *name; /* name of the module */
+ char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
struct module *me; /* pointer to self */
const struct nf_conntrack_expect_policy *expect_policy;
+ /* length of internal data, ie. sizeof(struct nf_ct_*_master) */
+ size_t data_len;
+
/* Tuple of things we will help (compared against server response) */
struct nf_conntrack_tuple tuple;
@@ -35,32 +44,76 @@ struct nf_conntrack_helper {
void (*destroy)(struct nf_conn *ct);
+ int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct);
int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
unsigned int expect_class_max;
+
+ unsigned int flags;
+ unsigned int queue_num; /* For user-space helpers. */
};
-extern struct nf_conntrack_helper *
-__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+ u16 l3num, u8 protonum);
-extern struct nf_conntrack_helper *
-nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+ u16 l3num,
+ u8 protonum);
-extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
-extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ gfp_t gfp);
-extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
- gfp_t flags);
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+ gfp_t flags);
-extern void nf_ct_helper_destroy(struct nf_conn *ct);
+void nf_ct_helper_destroy(struct nf_conn *ct);
static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
{
return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
}
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+static inline void *nfct_help_data(const struct nf_conn *ct)
+{
+ struct nf_conn_help *help;
+
+ help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+
+ return (void *)help->data;
+}
+
+int nf_conntrack_helper_pernet_init(struct net *net);
+void nf_conntrack_helper_pernet_fini(struct net *net);
+
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
+
+struct nf_ct_helper_expectfn {
+ struct list_head head;
+ const char *name;
+ void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp);
+};
+
+__printf(3,4)
+void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
+ const char *fmt, ...);
+
+void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n);
+void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_name(const char *name);
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
+
+extern struct hlist_head *nf_ct_helper_hash;
+extern unsigned int nf_ct_helper_hsize;
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index a7547611e8f..adc1fa3dd7a 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -64,22 +64,29 @@ struct nf_conntrack_l3proto {
size_t nla_size;
#ifdef CONFIG_SYSCTL
- struct ctl_table_header *ctl_table_header;
- struct ctl_path *ctl_table_path;
- struct ctl_table *ctl_table;
+ const char *ctl_table_path;
#endif /* CONFIG_SYSCTL */
+ /* Init l3proto pernet data */
+ int (*init_net)(struct net *net);
+
/* Module (if any) which this is connected to. */
struct module *me;
};
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
+
+/* Protocol pernet registration. */
+int nf_ct_l3proto_pernet_register(struct net *net,
+ struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_pernet_unregister(struct net *net,
+ struct nf_conntrack_l3proto *proto);
+
+/* Protocol global registration. */
+int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
-/* Protocol registration. */
-extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
-extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
+struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
/* Existing built-in protocols */
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index e3d3ee3c06a..4c8d573830b 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -12,6 +12,7 @@
#include <linux/netlink.h>
#include <net/netlink.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/netns/generic.h>
struct seq_file;
@@ -39,12 +40,13 @@ struct nf_conntrack_l4proto {
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum);
+ unsigned int hooknum,
+ unsigned int *timeouts);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff);
+ unsigned int dataoff, unsigned int *timeouts);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -60,6 +62,9 @@ struct nf_conntrack_l4proto {
/* Print out the private part of the conntrack. */
int (*print_conntrack)(struct seq_file *s, struct nf_conn *);
+ /* Return the array of timeouts for this protocol. */
+ unsigned int *(*get_timeouts)(struct net *net);
+
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -79,15 +84,24 @@ struct nf_conntrack_l4proto {
size_t nla_size;
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header **ctl_table_header;
- struct ctl_table *ctl_table;
- unsigned int *ctl_table_users;
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- struct ctl_table_header *ctl_compat_table_header;
- struct ctl_table *ctl_compat_table;
-#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ struct {
+ size_t obj_size;
+ int (*nlattr_to_obj)(struct nlattr *tb[],
+ struct net *net, void *data);
+ int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
+
+ unsigned int nlattr_max;
+ const struct nla_policy *nla_policy;
+ } ctnl_timeout;
#endif
+ int *net_id;
+ /* Init l4proto pernet data */
+ int (*init_net)(struct net *net, u_int16_t proto);
+
+ /* Return the per-net protocol part. */
+ struct nf_proto_net *(*get_net_proto)(struct net *net);
+
/* Protocol name */
const char *name;
@@ -100,33 +114,44 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO 256
-extern struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
+struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+ u_int8_t l4proto);
+
+struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+ u_int8_t l4proto);
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
-/* Protocol registration. */
-extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+/* Protocol pernet registration. */
+int nf_ct_l4proto_pernet_register(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+
+/* Protocol global registration. */
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+
+static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
+{
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ kfree(pn->ctl_compat_table);
+ pn->ctl_compat_table = NULL;
+#endif
+}
/* Generic netlink helpers */
-extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
- const struct nf_conntrack_tuple *tuple);
-extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
-extern int nf_ct_port_nlattr_tuple_size(void);
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t);
+int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
#define LOG_INVALID(net, proto) \
((net)->ct.sysctl_log_invalid == (proto) || \
(net)->ct.sysctl_log_invalid == IPPROTO_RAW)
#else
-#define LOG_INVALID(net, proto) \
- (((net)->ct.sysctl_log_invalid == (proto) || \
- (net)->ct.sysctl_log_invalid == IPPROTO_RAW) \
- && net_ratelimit())
-#endif
-#else
static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
#endif /* CONFIG_SYSCTL */
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
new file mode 100644
index 00000000000..dec6336bf85
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -0,0 +1,60 @@
+#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+#include <uapi/linux/netfilter/xt_connlabel.h>
+
+#define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
+
+struct nf_conn_labels {
+ u8 words;
+ unsigned long bits[];
+};
+
+static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ return nf_ct_ext_find(ct, NF_CT_EXT_LABELS);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ struct nf_conn_labels *cl_ext;
+ struct net *net = nf_ct_net(ct);
+ u8 words;
+
+ words = ACCESS_ONCE(net->ct.label_words);
+ if (words == 0)
+ return NULL;
+
+ cl_ext = nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS,
+ words * sizeof(long), GFP_ATOMIC);
+ if (cl_ext != NULL)
+ cl_ext->words = words;
+
+ return cl_ext;
+#else
+ return NULL;
+#endif
+}
+
+bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
+int nf_connlabel_set(struct nf_conn *ct, u16 bit);
+
+int nf_connlabels_replace(struct nf_conn *ct,
+ const u32 *data, const u32 *mask, unsigned int words);
+
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+int nf_conntrack_labels_init(void);
+void nf_conntrack_labels_fini(void);
+#else
+static inline int nf_conntrack_labels_init(void) { return 0; }
+static inline void nf_conntrack_labels_fini(void) {}
+#endif
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
new file mode 100644
index 00000000000..4b3362991a2
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -0,0 +1,47 @@
+#ifndef _NF_CONNTRACK_SEQADJ_H
+#define _NF_CONNTRACK_SEQADJ_H
+
+#include <net/netfilter/nf_conntrack_extend.h>
+
+/**
+ * struct nf_ct_seqadj - sequence number adjustment information
+ *
+ * @correction_pos: position of the last TCP sequence number modification
+ * @offset_before: sequence number offset before last modification
+ * @offset_after: sequence number offset after last modification
+ */
+struct nf_ct_seqadj {
+ u32 correction_pos;
+ s32 offset_before;
+ s32 offset_after;
+};
+
+struct nf_conn_seqadj {
+ struct nf_ct_seqadj seq[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ);
+}
+
+static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
+{
+ return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
+}
+
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, s32 off);
+
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
+
+int nf_conntrack_seqadj_init(void);
+void nf_conntrack_seqadj_fini(void);
+
+#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
new file mode 100644
index 00000000000..6793614e650
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -0,0 +1,75 @@
+#ifndef _NF_CONNTRACK_SYNPROXY_H
+#define _NF_CONNTRACK_SYNPROXY_H
+
+#include <net/netns/generic.h>
+
+struct nf_conn_synproxy {
+ u32 isn;
+ u32 its;
+ u32 tsoff;
+};
+
+static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC);
+#else
+ return NULL;
+#endif
+}
+
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats __percpu *stats;
+};
+
+extern int synproxy_net_id;
+static inline struct synproxy_net *synproxy_pernet(struct net *net)
+{
+ return net_generic(net, synproxy_net_id);
+}
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct tcphdr;
+struct xt_synproxy_info;
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+unsigned int synproxy_options_size(const struct synproxy_options *opts);
+void synproxy_build_options(struct tcphdr *th,
+ const struct synproxy_options *opts);
+
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts);
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
+ struct tcphdr *th, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy);
+
+#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
new file mode 100644
index 00000000000..62308713dd7
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -0,0 +1,98 @@
+#ifndef _NF_CONNTRACK_TIMEOUT_H
+#define _NF_CONNTRACK_TIMEOUT_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+#define CTNL_TIMEOUT_NAME_MAX 32
+
+struct ctnl_timeout {
+ struct list_head head;
+ struct rcu_head rcu_head;
+ atomic_t refcnt;
+ char name[CTNL_TIMEOUT_NAME_MAX];
+ __u16 l3num;
+ struct nf_conntrack_l4proto *l4proto;
+ char data[0];
+};
+
+struct nf_conn_timeout {
+ struct ctnl_timeout *timeout;
+};
+
+#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data)
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
+ struct ctnl_timeout *timeout,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp);
+ if (timeout_ext == NULL)
+ return NULL;
+
+ timeout_ext->timeout = timeout;
+
+ return timeout_ext;
+#else
+ return NULL;
+#endif
+};
+
+static inline unsigned int *
+nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+ unsigned int *timeouts;
+
+ timeout_ext = nf_ct_timeout_find(ct);
+ if (timeout_ext)
+ timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
+ else
+ timeouts = l4proto->get_timeouts(net);
+
+ return timeouts;
+#else
+ return l4proto->get_timeouts(net);
+#endif
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+int nf_conntrack_timeout_init(void);
+void nf_conntrack_timeout_fini(void);
+#else
+static inline int nf_conntrack_timeout_init(void)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_timeout_fini(void)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(const char *name);
+extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
+#endif
+
+#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 00000000000..300ae2209f2
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,78 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ struct net *net = nf_ct_net(ct);
+
+ if (!net->ct.sysctl_tstamp)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+ return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+ return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+ net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+int nf_conntrack_tstamp_pernet_init(struct net *net);
+void nf_conntrack_tstamp_pernet_fini(struct net *net);
+
+int nf_conntrack_tstamp_init(void);
+void nf_conntrack_tstamp_fini(void);
+#else
+static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_tstamp_pernet_fini(struct net *net)
+{
+ return;
+}
+
+static inline int nf_conntrack_tstamp_init(void)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(void)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 4ee44c84a30..aea3f8221be 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -24,32 +24,6 @@
#define NF_CT_TUPLE_L3SIZE ARRAY_SIZE(((union nf_inet_addr *)NULL)->all)
-/* The protocol-specific manipulable parts of the tuple: always in
- network order! */
-union nf_conntrack_man_proto {
- /* Add other protocols here. */
- __be16 all;
-
- struct {
- __be16 port;
- } tcp;
- struct {
- __be16 port;
- } udp;
- struct {
- __be16 id;
- } icmp;
- struct {
- __be16 port;
- } dccp;
- struct {
- __be16 port;
- } sctp;
- struct {
- __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */
- } gre;
-};
-
/* The manipulable part of the tuple. */
struct nf_conntrack_man {
union nf_inet_addr u3;
@@ -104,8 +78,6 @@ struct nf_conntrack_tuple_mask {
} src;
};
-#ifdef __KERNEL__
-
static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t)
{
#ifdef DEBUG
@@ -148,8 +120,6 @@ struct nf_conntrack_tuple_hash {
struct nf_conntrack_tuple tuple;
};
-#endif /* __KERNEL__ */
-
static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 920997f1aff..99eac12d040 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -30,7 +30,8 @@ struct nf_loginfo {
} u;
};
-typedef void nf_logfn(u_int8_t pf,
+typedef void nf_logfn(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
@@ -49,16 +50,23 @@ struct nf_logger {
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
-int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger);
-void nf_log_unbind_pf(u_int8_t pf);
+void nf_log_set(struct net *net, u_int8_t pf,
+ const struct nf_logger *logger);
+void nf_log_unset(struct net *net, const struct nf_logger *logger);
+
+int nf_log_bind_pf(struct net *net, u_int8_t pf,
+ const struct nf_logger *logger);
+void nf_log_unbind_pf(struct net *net, u_int8_t pf);
/* Calls the registered backend logging function */
-void nf_log_packet(u_int8_t pf,
+__printf(8, 9)
+void nf_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *li,
- const char *fmt, ...) __attribute__ ((format(printf,7,8)));
+ const char *fmt, ...);
#endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index f5f09f032a9..a71dd333ac6 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,54 +1,18 @@
#ifndef _NF_NAT_H
#define _NF_NAT_H
#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_nat.h>
#include <net/netfilter/nf_conntrack_tuple.h>
-#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
-
enum nf_nat_manip_type {
- IP_NAT_MANIP_SRC,
- IP_NAT_MANIP_DST
+ NF_NAT_MANIP_SRC,
+ NF_NAT_MANIP_DST
};
/* SRC manip occurs POST_ROUTING or LOCAL_IN */
#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \
(hooknum) != NF_INET_LOCAL_IN)
-#define IP_NAT_RANGE_MAP_IPS 1
-#define IP_NAT_RANGE_PROTO_SPECIFIED 2
-#define IP_NAT_RANGE_PROTO_RANDOM 4
-#define IP_NAT_RANGE_PERSISTENT 8
-
-/* NAT sequence number modifications */
-struct nf_nat_seq {
- /* position of the last TCP sequence number modification (if any) */
- u_int32_t correction_pos;
-
- /* sequence number offset before and after last modification */
- int16_t offset_before, offset_after;
-};
-
-/* Single range specification. */
-struct nf_nat_range {
- /* Set to OR of flags above. */
- unsigned int flags;
-
- /* Inclusive: network order. */
- __be32 min_ip, max_ip;
-
- /* Inclusive: network order */
- union nf_conntrack_man_proto min, max;
-};
-
-/* For backwards compat: don't use in modern code. */
-struct nf_nat_multi_range_compat {
- unsigned int rangesize; /* Must be 1. */
-
- /* hangs off end. */
- struct nf_nat_range range[1];
-};
-
-#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#include <net/netfilter/nf_conntrack_extend.h>
@@ -56,7 +20,9 @@ struct nf_nat_multi_range_compat {
/* per conntrack: nat application helper private data */
union nf_conntrack_nat_help {
/* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
struct nf_nat_pptp nat_pptp_info;
+#endif
};
struct nf_conn;
@@ -64,30 +30,52 @@ struct nf_conn;
/* The structure embedded in the conntrack structure. */
struct nf_conn_nat {
struct hlist_node bysource;
- struct nf_nat_seq seq[IP_CT_DIR_MAX];
struct nf_conn *ct;
union nf_conntrack_nat_help help;
#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
+ defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
+ defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
+ defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
int masq_index;
#endif
};
/* Set up the info structure to map into this range. */
-extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
- const struct nf_nat_range *range,
- enum nf_nat_manip_type maniptype);
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+ const struct nf_nat_range *range,
+ enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+ unsigned int hooknum);
+
+struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
/* Is this tuple already taken? (not by us)*/
-extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
+int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+ return NULL;
+#endif
+}
+
+static inline bool nf_nat_oif_changed(unsigned int hooknum,
+ enum ip_conntrack_info ctinfo,
+ struct nf_conn_nat *nat,
+ const struct net_device *out)
+{
+#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \
+ IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE)
+ return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
+ nat->masq_index != out->ifindex;
+#else
+ return false;
+#endif
}
-#else /* !__KERNEL__: iptables wants this to compile. */
-#define nf_nat_multi_range nf_nat_multi_range_compat
-#endif /*__KERNEL__*/
#endif
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 33602ab6619..fbfd1ba4254 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -7,23 +7,18 @@
/* This header used to share core functionality between the standalone
NAT module, and the compatibility layer's use of NAT for masquerading. */
-extern unsigned int nf_nat_packet(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- struct sk_buff *skb);
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, struct sk_buff *skb);
-extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- struct sk_buff *skb);
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
- if (manip == IP_NAT_MANIP_SRC)
- return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ if (manip == NF_NAT_MANIP_SRC)
+ return ct->status & IPS_SRC_NAT_DONE;
else
- return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_DST_NAT_DONE;
}
struct nlattr;
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 02bb6c29dc3..01bcc6bfbcc 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -7,51 +7,34 @@
struct sk_buff;
/* These return true or false. */
-extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len, bool adjust);
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len, bool adjust);
static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len)
{
- return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+ return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
match_offset, match_len,
rep_buffer, rep_len, true);
}
-extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len);
-
-extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off);
-extern int nf_nat_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo);
-extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo);
+int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len);
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
-extern void nf_nat_follow_master(struct nf_conn *ct,
- struct nf_conntrack_expect *this);
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
-extern s16 nf_nat_get_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
#endif
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
new file mode 100644
index 00000000000..5a2919b2e09
--- /dev/null
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -0,0 +1,49 @@
+#ifndef _NF_NAT_L3PROTO_H
+#define _NF_NAT_L3PROTO_H
+
+struct nf_nat_l4proto;
+struct nf_nat_l3proto {
+ u8 l3proto;
+
+ bool (*in_range)(const struct nf_conntrack_tuple *t,
+ const struct nf_nat_range *range);
+
+ u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
+
+ bool (*manip_pkt)(struct sk_buff *skb,
+ unsigned int iphdroff,
+ const struct nf_nat_l4proto *l4proto,
+ const struct nf_conntrack_tuple *target,
+ enum nf_nat_manip_type maniptype);
+
+ void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
+ __sum16 *check,
+ const struct nf_conntrack_tuple *t,
+ enum nf_nat_manip_type maniptype);
+
+ void (*csum_recalc)(struct sk_buff *skb, u8 proto,
+ void *data, __sum16 *check,
+ int datalen, int oldlen);
+
+ void (*decode_session)(struct sk_buff *skb,
+ const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ unsigned long statusbit,
+ struct flowi *fl);
+
+ int (*nlattr_to_range)(struct nlattr *tb[],
+ struct nf_nat_range *range);
+};
+
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum);
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, unsigned int hdrlen);
+
+#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
new file mode 100644
index 00000000000..12f4cc841b6
--- /dev/null
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -0,0 +1,72 @@
+/* Header for use in defining a given protocol. */
+#ifndef _NF_NAT_L4PROTO_H
+#define _NF_NAT_L4PROTO_H
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+struct nf_nat_range;
+struct nf_nat_l3proto;
+
+struct nf_nat_l4proto {
+ /* Protocol number. */
+ u8 l4proto;
+
+ /* Translate a packet to the target according to manip type.
+ * Return true if succeeded.
+ */
+ bool (*manip_pkt)(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype);
+
+ /* Is the manipable part of the tuple between min and max incl? */
+ bool (*in_range)(const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype,
+ const union nf_conntrack_man_proto *min,
+ const union nf_conntrack_man_proto *max);
+
+ /* Alter the per-proto part of the tuple (depending on
+ * maniptype), to give a unique tuple in the given range if
+ * possible. Per-protocol part of tuple is initialized to the
+ * incoming packet.
+ */
+ void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_nat_range *range,
+ enum nf_nat_manip_type maniptype,
+ const struct nf_conn *ct);
+
+ int (*nlattr_to_range)(struct nlattr *tb[],
+ struct nf_nat_range *range);
+};
+
+/* Protocol registration. */
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+void nf_nat_l4proto_unregister(u8 l3proto,
+ const struct nf_nat_l4proto *l4proto);
+
+const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+
+/* Built-in protocols. */
+extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
+extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
+
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype,
+ const union nf_conntrack_man_proto *min,
+ const union nf_conntrack_man_proto *max);
+
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_nat_range *range,
+ enum nf_nat_manip_type maniptype,
+ const struct nf_conn *ct, u16 *rover);
+
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+ struct nf_nat_range *range);
+
+#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
deleted file mode 100644
index 93cc90d28e6..00000000000
--- a/include/net/netfilter/nf_nat_protocol.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Header for use in defining a given protocol. */
-#ifndef _NF_NAT_PROTOCOL_H
-#define _NF_NAT_PROTOCOL_H
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct nf_nat_range;
-
-struct nf_nat_protocol {
- /* Protocol number. */
- unsigned int protonum;
-
- struct module *me;
-
- /* Translate a packet to the target according to manip type.
- Return true if succeeded. */
- bool (*manip_pkt)(struct sk_buff *skb,
- unsigned int iphdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype);
-
- /* Is the manipable part of the tuple between min and max incl? */
- bool (*in_range)(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
- /* Alter the per-proto part of the tuple (depending on
- maniptype), to give a unique tuple in the given range if
- possible. Per-protocol part of tuple is initialized to the
- incoming packet. */
- void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct);
-
- int (*range_to_nlattr)(struct sk_buff *skb,
- const struct nf_nat_range *range);
-
- int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range *range);
-};
-
-/* Protocol registration. */
-extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto);
-extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_protocol nf_nat_protocol_tcp;
-extern const struct nf_nat_protocol nf_nat_protocol_udp;
-extern const struct nf_nat_protocol nf_nat_protocol_icmp;
-extern const struct nf_nat_protocol nf_nat_unknown_protocol;
-
-extern int init_protocols(void) __init;
-extern void cleanup_protocols(void);
-extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
-
-extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
-extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct,
- u_int16_t *rover);
-
-extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
- const struct nf_nat_range *range);
-extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range *range);
-
-#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
deleted file mode 100644
index 2890bdc4cd9..00000000000
--- a/include/net/netfilter/nf_nat_rule.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _NF_NAT_RULE_H
-#define _NF_NAT_RULE_H
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-extern int nf_nat_rule_init(void) __init;
-extern void nf_nat_rule_cleanup(void);
-extern int nf_nat_rule_find(struct sk_buff *skb,
- unsigned int hooknum,
- const struct net_device *in,
- const struct net_device *out,
- struct nf_conn *ct);
-
-#endif /* _NF_NAT_RULE_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 252fd1010b7..84a53d78030 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -1,6 +1,10 @@
#ifndef _NF_QUEUE_H
#define _NF_QUEUE_H
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+
/* Each queued (to userspace) skbuff has one of these. */
struct nf_queue_entry {
struct list_head list;
@@ -9,10 +13,13 @@ struct nf_queue_entry {
struct nf_hook_ops *elem;
u_int8_t pf;
+ u16 size; /* sizeof(entry) + saved route keys */
unsigned int hook;
struct net_device *indev;
struct net_device *outdev;
int (*okfn)(struct sk_buff *);
+
+ /* extra space to store route keys */
};
#define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
@@ -21,14 +28,71 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
- char *name;
};
-extern int nf_register_queue_handler(u_int8_t pf,
- const struct nf_queue_handler *qh);
-extern int nf_unregister_queue_handler(u_int8_t pf,
- const struct nf_queue_handler *qh);
-extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
-extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_register_queue_handler(const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(void);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+
+static inline void init_hashrandom(u32 *jhash_initval)
+{
+ while (*jhash_initval == 0)
+ *jhash_initval = prandom_u32();
+}
+
+static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* packets in either direction go into same queue */
+ if ((__force u32)iph->saddr < (__force u32)iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, jhash_initval);
+
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, jhash_initval);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ u32 a, b, c;
+
+ if ((__force u32)ip6h->saddr.s6_addr32[3] <
+ (__force u32)ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
+
+ if ((__force u32)ip6h->saddr.s6_addr32[1] <
+ (__force u32)ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
+
+ return jhash_3words(a, b, c, jhash_initval);
+}
+#endif
+
+static inline u32
+nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
+ u32 jhash_initval)
+{
+ if (family == NFPROTO_IPV4)
+ queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ else if (family == NFPROTO_IPV6)
+ queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
+#endif
+
+ return queue;
+}
#endif /* _NF_QUEUE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644
index 00000000000..c4d86198d3d
--- /dev/null
+++ b/include/net/netfilter/nf_tables.h
@@ -0,0 +1,655 @@
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
+#include <net/netlink.h>
+
+#define NFT_JUMP_STACK_SIZE 16
+
+struct nft_pktinfo {
+ struct sk_buff *skb;
+ const struct net_device *in;
+ const struct net_device *out;
+ const struct nf_hook_ops *ops;
+ u8 nhoff;
+ u8 thoff;
+ u8 tprot;
+ /* for x_tables compatibility */
+ struct xt_action_param xt;
+};
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ pkt->skb = skb;
+ pkt->in = pkt->xt.in = in;
+ pkt->out = pkt->xt.out = out;
+ pkt->ops = ops;
+ pkt->xt.hooknum = ops->hooknum;
+ pkt->xt.family = ops->pf;
+}
+
+struct nft_data {
+ union {
+ u32 data[4];
+ struct {
+ u32 verdict;
+ struct nft_chain *chain;
+ };
+ };
+} __attribute__((aligned(__alignof__(u64))));
+
+static inline int nft_data_cmp(const struct nft_data *d1,
+ const struct nft_data *d2,
+ unsigned int len)
+{
+ return memcmp(d1->data, d2->data, len);
+}
+
+static inline void nft_data_copy(struct nft_data *dst,
+ const struct nft_data *src)
+{
+ BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
+ *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
+ *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+}
+
+static inline void nft_data_debug(const struct nft_data *data)
+{
+ pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
+ data->data[0], data->data[1],
+ data->data[2], data->data[3]);
+}
+
+/**
+ * struct nft_ctx - nf_tables rule/set context
+ *
+ * @net: net namespace
+ * @afi: address family info
+ * @table: the table the chain is contained in
+ * @chain: the chain the rule is contained in
+ * @nla: netlink attributes
+ * @portid: netlink portID of the original message
+ * @seq: netlink sequence number
+ * @report: notify via unicast netlink message
+ */
+struct nft_ctx {
+ struct net *net;
+ struct nft_af_info *afi;
+ struct nft_table *table;
+ struct nft_chain *chain;
+ const struct nlattr * const *nla;
+ u32 portid;
+ u32 seq;
+ bool report;
+};
+
+struct nft_data_desc {
+ enum nft_data_types type;
+ unsigned int len;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+ return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+}
+
+int nft_validate_input_register(enum nft_registers reg);
+int nft_validate_output_register(enum nft_registers reg);
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type);
+
+/**
+ * struct nft_set_elem - generic representation of set elements
+ *
+ * @cookie: implementation specific element cookie
+ * @key: element key
+ * @data: element data (maps only)
+ * @flags: element flags (end of interval)
+ *
+ * The cookie can be used to store a handle to the element for subsequent
+ * removal.
+ */
+struct nft_set_elem {
+ void *cookie;
+ struct nft_data key;
+ struct nft_data data;
+ u32 flags;
+};
+
+struct nft_set;
+struct nft_set_iter {
+ unsigned int count;
+ unsigned int skip;
+ int err;
+ int (*fn)(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_set_iter *iter,
+ const struct nft_set_elem *elem);
+};
+
+/**
+ * struct nft_set_desc - description of set elements
+ *
+ * @klen: key length
+ * @dlen: data length
+ * @size: number of set elements
+ */
+struct nft_set_desc {
+ unsigned int klen;
+ unsigned int dlen;
+ unsigned int size;
+};
+
+/**
+ * enum nft_set_class - performance class
+ *
+ * @NFT_LOOKUP_O_1: constant, O(1)
+ * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
+ * @NFT_LOOKUP_O_N: linear, O(N)
+ */
+enum nft_set_class {
+ NFT_SET_CLASS_O_1,
+ NFT_SET_CLASS_O_LOG_N,
+ NFT_SET_CLASS_O_N,
+};
+
+/**
+ * struct nft_set_estimate - estimation of memory and performance
+ * characteristics
+ *
+ * @size: required memory
+ * @class: lookup performance class
+ */
+struct nft_set_estimate {
+ unsigned int size;
+ enum nft_set_class class;
+};
+
+/**
+ * struct nft_set_ops - nf_tables set operations
+ *
+ * @lookup: look up an element within the set
+ * @insert: insert new element into set
+ * @remove: remove element from set
+ * @walk: iterate over all set elemeennts
+ * @privsize: function to return size of set private data
+ * @init: initialize private data of new set instance
+ * @destroy: destroy private data of set instance
+ * @list: nf_tables_set_ops list node
+ * @owner: module reference
+ * @features: features supported by the implementation
+ */
+struct nft_set_ops {
+ bool (*lookup)(const struct nft_set *set,
+ const struct nft_data *key,
+ struct nft_data *data);
+ int (*get)(const struct nft_set *set,
+ struct nft_set_elem *elem);
+ int (*insert)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void (*remove)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void (*walk)(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ struct nft_set_iter *iter);
+
+ unsigned int (*privsize)(const struct nlattr * const nla[]);
+ bool (*estimate)(const struct nft_set_desc *desc,
+ u32 features,
+ struct nft_set_estimate *est);
+ int (*init)(const struct nft_set *set,
+ const struct nft_set_desc *desc,
+ const struct nlattr * const nla[]);
+ void (*destroy)(const struct nft_set *set);
+
+ struct list_head list;
+ struct module *owner;
+ u32 features;
+};
+
+int nft_register_set(struct nft_set_ops *ops);
+void nft_unregister_set(struct nft_set_ops *ops);
+
+/**
+ * struct nft_set - nf_tables set instance
+ *
+ * @list: table set list node
+ * @bindings: list of set bindings
+ * @name: name of the set
+ * @ktype: key type (numeric type defined by userspace, not used in the kernel)
+ * @dtype: data type (verdict or numeric type defined by userspace)
+ * @size: maximum set size
+ * @nelems: number of elements
+ * @ops: set ops
+ * @flags: set flags
+ * @klen: key length
+ * @dlen: data length
+ * @data: private set data
+ */
+struct nft_set {
+ struct list_head list;
+ struct list_head bindings;
+ char name[IFNAMSIZ];
+ u32 ktype;
+ u32 dtype;
+ u32 size;
+ u32 nelems;
+ /* runtime data below here */
+ const struct nft_set_ops *ops ____cacheline_aligned;
+ u16 flags;
+ u8 klen;
+ u8 dlen;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+ return (void *)set->data;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+ const struct nlattr *nla);
+struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+ const struct nlattr *nla);
+
+/**
+ * struct nft_set_binding - nf_tables set binding
+ *
+ * @list: set bindings list node
+ * @chain: chain containing the rule bound to the set
+ *
+ * A set binding contains all information necessary for validation
+ * of new elements added to a bound set.
+ */
+struct nft_set_binding {
+ struct list_head list;
+ const struct nft_chain *chain;
+};
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+
+
+/**
+ * struct nft_expr_type - nf_tables expression type
+ *
+ * @select_ops: function to select nft_expr_ops
+ * @ops: default ops, used when no select_ops functions is present
+ * @list: used internally
+ * @name: Identifier
+ * @owner: module reference
+ * @policy: netlink attribute policy
+ * @maxattr: highest netlink attribute number
+ * @family: address family for AF-specific types
+ */
+struct nft_expr_type {
+ const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
+ const struct nlattr * const tb[]);
+ const struct nft_expr_ops *ops;
+ struct list_head list;
+ const char *name;
+ struct module *owner;
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+ u8 family;
+};
+
+/**
+ * struct nft_expr_ops - nf_tables expression operations
+ *
+ * @eval: Expression evaluation function
+ * @size: full expression size, including private data size
+ * @init: initialization function
+ * @destroy: destruction function
+ * @dump: function to dump parameters
+ * @type: expression type
+ * @validate: validate expression, called during loop detection
+ * @data: extra data to attach to this expression operation
+ */
+struct nft_expr;
+struct nft_expr_ops {
+ void (*eval)(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+ unsigned int size;
+
+ int (*init)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ int (*dump)(struct sk_buff *skb,
+ const struct nft_expr *expr);
+ int (*validate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+ const struct nft_expr_type *type;
+ void *data;
+};
+
+#define NFT_EXPR_MAXATTR 16
+#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
+ ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ * struct nft_expr - nf_tables expression
+ *
+ * @ops: expression ops
+ * @data: expression private data
+ */
+struct nft_expr {
+ const struct nft_expr_ops *ops;
+ unsigned char data[];
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+ return (void *)expr->data;
+}
+
+/**
+ * struct nft_rule - nf_tables rule
+ *
+ * @list: used internally
+ * @handle: rule handle
+ * @genmask: generation mask
+ * @dlen: length of expression data
+ * @ulen: length of user data (used for comments)
+ * @data: expression data
+ */
+struct nft_rule {
+ struct list_head list;
+ u64 handle:42,
+ genmask:2,
+ dlen:12,
+ ulen:8;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+/**
+ * struct nft_trans - nf_tables object update in transaction
+ *
+ * @rcu_head: rcu head to defer release of transaction data
+ * @list: used internally
+ * @msg_type: message type
+ * @ctx: transaction context
+ * @data: internal information related to the transaction
+ */
+struct nft_trans {
+ struct rcu_head rcu_head;
+ struct list_head list;
+ int msg_type;
+ struct nft_ctx ctx;
+ char data[0];
+};
+
+struct nft_trans_rule {
+ struct nft_rule *rule;
+};
+
+#define nft_trans_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
+};
+
+#define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+ bool update;
+ char name[NFT_CHAIN_MAXNAMELEN];
+ struct nft_stats __percpu *stats;
+ u8 policy;
+};
+
+#define nft_trans_chain_update(trans) \
+ (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans) \
+ (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans) \
+ (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans) \
+ (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+ bool update;
+ bool enable;
+};
+
+#define nft_trans_table_update(trans) \
+ (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans) \
+ (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
+};
+
+#define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+ return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+static inline void *nft_userdata(const struct nft_rule *rule)
+{
+ return (void *)&rule->data[rule->dlen];
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+ for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+ (expr) != (last); \
+ (expr) = nft_expr_next(expr))
+
+enum nft_chain_flags {
+ NFT_BASE_CHAIN = 0x1,
+ NFT_CHAIN_INACTIVE = 0x2,
+};
+
+/**
+ * struct nft_chain - nf_tables chain
+ *
+ * @rules: list of rules in the chain
+ * @list: used internally
+ * @net: net namespace that this chain belongs to
+ * @table: table that this chain belongs to
+ * @handle: chain handle
+ * @use: number of jump references to this chain
+ * @level: length of longest path to this chain
+ * @flags: bitmask of enum nft_chain_flags
+ * @name: name of the chain
+ */
+struct nft_chain {
+ struct list_head rules;
+ struct list_head list;
+ struct net *net;
+ struct nft_table *table;
+ u64 handle;
+ u32 use;
+ u16 level;
+ u8 flags;
+ char name[NFT_CHAIN_MAXNAMELEN];
+};
+
+enum nft_chain_type {
+ NFT_CHAIN_T_DEFAULT = 0,
+ NFT_CHAIN_T_ROUTE,
+ NFT_CHAIN_T_NAT,
+ NFT_CHAIN_T_MAX
+};
+
+struct nft_stats {
+ u64 bytes;
+ u64 pkts;
+ struct u64_stats_sync syncp;
+};
+
+#define NFT_HOOK_OPS_MAX 2
+
+/**
+ * struct nft_base_chain - nf_tables base chain
+ *
+ * @ops: netfilter hook ops
+ * @type: chain type
+ * @policy: default policy
+ * @stats: per-cpu chain stats
+ * @chain: the chain
+ */
+struct nft_base_chain {
+ struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ const struct nf_chain_type *type;
+ u8 policy;
+ struct nft_stats __percpu *stats;
+ struct nft_chain chain;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+ return container_of(chain, struct nft_base_chain, chain);
+}
+
+unsigned int nft_do_chain(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops);
+
+/**
+ * struct nft_table - nf_tables table
+ *
+ * @list: used internally
+ * @chains: chains in the table
+ * @sets: sets in the table
+ * @hgenerator: handle generator state
+ * @use: number of chain references to this table
+ * @flags: table flag (see enum nft_table_flags)
+ * @name: name of the table
+ */
+struct nft_table {
+ struct list_head list;
+ struct list_head chains;
+ struct list_head sets;
+ u64 hgenerator;
+ u32 use;
+ u16 flags;
+ char name[];
+};
+
+/**
+ * struct nft_af_info - nf_tables address family info
+ *
+ * @list: used internally
+ * @family: address family
+ * @nhooks: number of hooks in this family
+ * @owner: module owner
+ * @tables: used internally
+ * @nops: number of hook ops in this family
+ * @hook_ops_init: initialization function for chain hook ops
+ * @hooks: hookfn overrides for packet validation
+ */
+struct nft_af_info {
+ struct list_head list;
+ int family;
+ unsigned int nhooks;
+ struct module *owner;
+ struct list_head tables;
+ unsigned int nops;
+ void (*hook_ops_init)(struct nf_hook_ops *,
+ unsigned int);
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_afinfo(struct net *, struct nft_af_info *);
+void nft_unregister_afinfo(struct nft_af_info *);
+
+/**
+ * struct nf_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: hookfn overrides
+ */
+struct nf_chain_type {
+ const char *name;
+ enum nft_chain_type type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_chain_type(const struct nf_chain_type *);
+void nft_unregister_chain_type(const struct nf_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+#define nft_dereference(p) \
+ nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
+
+#define MODULE_ALIAS_NFT_FAMILY(family) \
+ MODULE_ALIAS("nft-afinfo-" __stringify(family))
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+ MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
+ MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+ MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_SET() \
+ MODULE_ALIAS("nft-set")
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644
index 00000000000..a75fc8e27cd
--- /dev/null
+++ b/include/net/netfilter/nf_tables_core.h
@@ -0,0 +1,52 @@
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+int nft_immediate_module_init(void);
+void nft_immediate_module_exit(void);
+
+struct nft_cmp_fast_expr {
+ u32 data;
+ enum nft_registers sreg:8;
+ u8 len;
+};
+
+/* Calculate the mask for the nft_cmp_fast expression. On big endian the
+ * mask needs to include the *upper* bytes when interpreting that data as
+ * something smaller than the full u32, therefore a cpu_to_le32 is done.
+ */
+static inline u32 nft_cmp_fast_mask(unsigned int len)
+{
+ return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
+ data) * BITS_PER_BYTE - len));
+}
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+
+int nft_cmp_module_init(void);
+void nft_cmp_module_exit(void);
+
+int nft_lookup_module_init(void);
+void nft_lookup_module_exit(void);
+
+int nft_bitwise_module_init(void);
+void nft_bitwise_module_exit(void);
+
+int nft_byteorder_module_init(void);
+void nft_byteorder_module_exit(void);
+
+struct nft_payload {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+ enum nft_registers dreg:8;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+int nft_payload_module_init(void);
+void nft_payload_module_exit(void);
+
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644
index 00000000000..cba143fbd2e
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -0,0 +1,26 @@
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void
+nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ struct iphdr *ip;
+
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+
+ ip = ip_hdr(pkt->skb);
+ pkt->tprot = ip->protocol;
+ pkt->xt.thoff = ip_hdrlen(pkt->skb);
+ pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+extern struct nft_af_info nft_af_ipv4;
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644
index 00000000000..74d97613765
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -0,0 +1,33 @@
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+
+static inline int
+nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ int protohdr, thoff = 0;
+ unsigned short frag_off;
+
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ /* If malformed, drop it */
+ if (protohdr < 0)
+ return -1;
+
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+
+ return 0;
+}
+
+extern struct nft_af_info nft_af_ipv6;
+
+#endif
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
deleted file mode 100644
index cd85b3bc832..00000000000
--- a/include/net/netfilter/nf_tproxy_core.h
+++ /dev/null
@@ -1,218 +0,0 @@
-#ifndef _NF_TPROXY_CORE_H
-#define _NF_TPROXY_CORE_H
-
-#include <linux/types.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
-#include <net/tcp.h>
-
-#define NFT_LOOKUP_ANY 0
-#define NFT_LOOKUP_LISTENER 1
-#define NFT_LOOKUP_ESTABLISHED 2
-
-/* look up and get a reference to a matching socket */
-
-
-/* This function is used by the 'TPROXY' target and the 'socket'
- * match. The following lookups are supported:
- *
- * Explicit TProxy target rule
- * ===========================
- *
- * This is used when the user wants to intercept a connection matching
- * an explicit iptables rule. In this case the sockets are assumed
- * matching in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple, it is returned, assuming the redirection
- * already took place and we process a packet belonging to an
- * established connection
- *
- * - match: if there's a listening socket matching the redirection
- * (e.g. on-port & on-ip of the connection), it is returned,
- * regardless if it was bound to 0.0.0.0 or an explicit
- * address. The reasoning is that if there's an explicit rule, it
- * does not really matter if the listener is bound to an interface
- * or to 0. The user already stated that he wants redirection
- * (since he added the rule).
- *
- * "socket" match based redirection (no specific rule)
- * ===================================================
- *
- * There are connections with dynamic endpoints (e.g. FTP data
- * connection) that the user is unable to add explicit rules
- * for. These are taken care of by a generic "socket" rule. It is
- * assumed that the proxy application is trusted to open such
- * connections without explicit iptables rule (except of course the
- * generic 'socket' rule). In this case the following sockets are
- * matched in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple
- *
- * - match: if there's a non-zero bound listener (possibly with a
- * non-local address) We don't accept zero-bound listeners, since
- * then local services could intercept traffic going through the
- * box.
- *
- * Please note that there's an overlap between what a TPROXY target
- * and a socket match will match. Normally if you have both rules the
- * "socket" match will be the first one, effectively all packets
- * belonging to established connections going through that one.
- */
-static inline struct sock *
-nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
- const __be32 saddr, const __be32 daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = __inet_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet_lookup_listener(net, &tcp_hashinfo,
- daddr, dport,
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = inet_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
- protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline struct sock *
-nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
- const struct in6_addr *saddr, const struct in6_addr *daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = inet6_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet6_lookup_listener(net, &tcp_hashinfo,
- daddr, ntohs(dport),
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = __inet6_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, ntohs(dport),
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
- protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-#endif
-
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
- /* TIME_WAIT inet sockets have to be handled differently */
- if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
- inet_twsk_put(inet_twsk(sk));
- else
- sock_put(sk);
-}
-
-/* assign a socket to the skb -- consumes sk */
-int
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
-
-#endif
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
index e2dec42c2db..5ca3f14f099 100644
--- a/include/net/netfilter/nfnetlink_log.h
+++ b/include/net/netfilter/nfnetlink_log.h
@@ -2,7 +2,8 @@
#define _KER_NFNETLINK_LOG_H
void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
new file mode 100644
index 00000000000..aff88ba9139
--- /dev/null
+++ b/include/net/netfilter/nfnetlink_queue.h
@@ -0,0 +1,51 @@
+#ifndef _NET_NFNL_QUEUE_H_
+#define _NET_NFNL_QUEUE_H_
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+struct nf_conn;
+
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
+ enum ip_conntrack_info *ctinfo);
+struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
+ const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo);
+int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff);
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report);
+#else
+inline struct nf_conn *
+nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
+{
+ return NULL;
+}
+
+inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
+ const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo)
+{
+ return NULL;
+}
+
+inline int
+nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ return 0;
+}
+
+inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff)
+{
+}
+
+inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ return 0;
+}
+#endif /* NF_CONNTRACK */
+#endif
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
new file mode 100644
index 00000000000..0ee47c3e2e3
--- /dev/null
+++ b/include/net/netfilter/nft_meta.h
@@ -0,0 +1,36 @@
+#ifndef _NFT_META_H_
+#define _NFT_META_H_
+
+struct nft_meta {
+ enum nft_meta_keys key:8;
+ union {
+ enum nft_registers dreg:8;
+ enum nft_registers sreg:8;
+ };
+};
+
+extern const struct nla_policy nft_meta_policy[];
+
+int nft_meta_get_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_meta_set_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_meta_get_dump(struct sk_buff *skb,
+ const struct nft_expr *expr);
+
+int nft_meta_set_dump(struct sk_buff *skb,
+ const struct nft_expr *expr);
+
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+void nft_meta_set_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 00000000000..36b0da2d55b
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
+#ifndef _NFT_REJECT_H_
+#define _NFT_REJECT_H_
+
+struct nft_reject {
+ enum nft_reject_types type:8;
+ u8 icmp_code;
+};
+
+extern const struct nla_policy nft_reject_policy[];
+
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+#endif
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
index 0dfb34a5b53..9d9756cca01 100644
--- a/include/net/netfilter/xt_log.h
+++ b/include/net/netfilter/xt_log.h
@@ -6,7 +6,7 @@ struct sbuff {
};
static struct sbuff emergency, *emergency_ptr = &emergency;
-static int sb_add(struct sbuff *m, const char *f, ...)
+static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
{
va_list args;
int len;
@@ -47,7 +47,7 @@ static void sb_close(struct sbuff *m)
if (likely(m != &emergency))
kfree(m);
else {
- xchg(&emergency_ptr, m);
+ emergency_ptr = m;
local_bh_enable();
}
}
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 5a2978d1cb2..79f45e19f31 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@ struct xt_rateest {
struct gnet_stats_basic_packed bstats;
spinlock_t lock;
/* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
- struct gnet_stats_rate_est rstats;
+ struct gnet_stats_rate_est64 rstats;
/* following fields not accessed in hot path */
struct hlist_node list;
@@ -16,7 +16,7 @@ struct xt_rateest {
struct rcu_head rcu;
};
-extern struct xt_rateest *xt_rateest_lookup(const char *name);
-extern void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(const char *name);
+void xt_rateest_put(struct xt_rateest *est);
#endif /* _XT_RATEEST_H */
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 9db401a8b4d..4fe018c48ed 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -4,7 +4,7 @@
* The NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -22,8 +22,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -38,7 +37,7 @@
#include <linux/in6.h>
#include <net/netlink.h>
#include <net/request_sock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct cipso_v4_doi;
@@ -110,8 +109,8 @@ struct cipso_v4_doi;
/* NetLabel audit information */
struct netlbl_audit {
u32 secid;
- uid_t loginuid;
- u32 sessionid;
+ kuid_t loginuid;
+ unsigned int sessionid;
};
/*
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 373f1a900cf..2b47eaadba8 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,24 +98,14 @@
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
* nla_put_u64(skb, type, value) add u64 attribute to skb
+ * nla_put_s8(skb, type, value) add s8 attribute to skb
+ * nla_put_s16(skb, type, value) add s16 attribute to skb
+ * nla_put_s32(skb, type, value) add s32 attribute to skb
+ * nla_put_s64(skb, type, value) add s64 attribute to skb
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
* nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
*
- * Exceptions Based Attribute Construction:
- * NLA_PUT(skb, type, len, data) add attribute to skb
- * NLA_PUT_U8(skb, type, value) add u8 attribute to skb
- * NLA_PUT_U16(skb, type, value) add u16 attribute to skb
- * NLA_PUT_U32(skb, type, value) add u32 attribute to skb
- * NLA_PUT_U64(skb, type, value) add u64 attribute to skb
- * NLA_PUT_STRING(skb, type, str) add string attribute to skb
- * NLA_PUT_FLAG(skb, type) add flag attribute to skb
- * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb
- *
- * The meaning of these functions is equal to their lower case
- * variants but they jump to the label nla_put_failure in case
- * of a failure.
- *
* Nested Attributes Construction:
* nla_nest_start(skb, type) start a nested attribute
* nla_nest_end(skb, nla) finalize a nested attribute
@@ -135,6 +125,10 @@
* nla_get_u16(nla) get payload for a u16 attribute
* nla_get_u32(nla) get payload for a u32 attribute
* nla_get_u64(nla) get payload for a u64 attribute
+ * nla_get_s8(nla) get payload for a s8 attribute
+ * nla_get_s16(nla) get payload for a s16 attribute
+ * nla_get_s32(nla) get payload for a s32 attribute
+ * nla_get_s64(nla) get payload for a s64 attribute
* nla_get_flag(nla) return 1 if flag is true
* nla_get_msecs(nla) get payload for a msecs attribute
*
@@ -174,6 +168,10 @@ enum {
NLA_NESTED_COMPAT,
NLA_NUL_STRING,
NLA_BINARY,
+ NLA_S8,
+ NLA_S16,
+ NLA_S32,
+ NLA_S64,
__NLA_TYPE_MAX,
};
@@ -192,8 +190,17 @@ enum {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_NESTED_COMPAT Exact length of structure payload
- * All other Exact length of attribute payload
+ * NLA_NESTED Don't use `len' field -- length verification is
+ * done by checking len of nested header (or empty)
+ * NLA_NESTED_COMPAT Minimum length of structure payload
+ * NLA_U8, NLA_U16,
+ * NLA_U32, NLA_U64,
+ * NLA_S8, NLA_S16,
+ * NLA_S32, NLA_S64,
+ * NLA_MSECS Leaving the length field zero will verify the
+ * given type fits, using it verifies minimum length
+ * just like "All other"
+ * All other Minimum length of attribute payload
*
* Example:
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
@@ -210,52 +217,39 @@ struct nla_policy {
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
- * @pid: Netlink PID of requesting application
+ * @portid: Netlink PORTID of requesting application
*/
struct nl_info {
struct nlmsghdr *nlh;
struct net *nl_net;
- u32 pid;
+ u32 portid;
};
-extern int netlink_rcv_skb(struct sk_buff *skb,
- int (*cb)(struct sk_buff *,
- struct nlmsghdr *));
-extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
- u32 pid, unsigned int group, int report,
- gfp_t flags);
-
-extern int nla_validate(const struct nlattr *head,
- int len, int maxtype,
- const struct nla_policy *policy);
-extern int nla_parse(struct nlattr **tb, int maxtype,
- const struct nlattr *head, int len,
- const struct nla_policy *policy);
-extern int nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr * nla_find(const struct nlattr *head,
- int len, int attrtype);
-extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
- size_t dstsize);
-extern int nla_memcpy(void *dest, const struct nlattr *src, int count);
-extern int nla_memcmp(const struct nlattr *nla, const void *data,
- size_t size);
-extern int nla_strcmp(const struct nlattr *nla, const char *str);
-extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
- int attrlen);
-extern void * __nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
- int attrlen);
-extern void * nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern void __nla_put(struct sk_buff *skb, int attrtype,
- int attrlen, const void *data);
-extern void __nla_put_nohdr(struct sk_buff *skb, int attrlen,
- const void *data);
-extern int nla_put(struct sk_buff *skb, int attrtype,
- int attrlen, const void *data);
-extern int nla_put_nohdr(struct sk_buff *skb, int attrlen,
- const void *data);
-extern int nla_append(struct sk_buff *skb, int attrlen,
- const void *data);
+int netlink_rcv_skb(struct sk_buff *skb,
+ int (*cb)(struct sk_buff *, struct nlmsghdr *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+ unsigned int group, int report, gfp_t flags);
+
+int nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy);
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
/**************************************************************************
* Netlink Messages
@@ -290,7 +284,7 @@ static inline int nlmsg_padlen(int payload)
/**
* nlmsg_data - head of message payload
- * @nlh: netlink messsage header
+ * @nlh: netlink message header
*/
static inline void *nlmsg_data(const struct nlmsghdr *nlh)
{
@@ -434,45 +428,10 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
nlmsg_attrlen(nlh, hdrlen), rem)
-#if 0
-/* FIXME: Enable once all users have been converted */
-
-/**
- * __nlmsg_put - Add a new netlink message to an skb
- * @skb: socket buffer to store message in
- * @pid: netlink process id
- * @seq: sequence number of message
- * @type: message type
- * @payload: length of message payload
- * @flags: message flags
- *
- * The caller is responsible to ensure that the skb provides enough
- * tailroom for both the netlink header and payload.
- */
-static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid,
- u32 seq, int type, int payload,
- int flags)
-{
- struct nlmsghdr *nlh;
-
- nlh = (struct nlmsghdr *) skb_put(skb, nlmsg_total_size(payload));
- nlh->nlmsg_type = type;
- nlh->nlmsg_len = nlmsg_msg_size(payload);
- nlh->nlmsg_flags = flags;
- nlh->nlmsg_pid = pid;
- nlh->nlmsg_seq = seq;
-
- memset((unsigned char *) nlmsg_data(nlh) + payload, 0,
- nlmsg_padlen(payload));
-
- return nlh;
-}
-#endif
-
/**
* nlmsg_put - Add a new netlink message to an skb
* @skb: socket buffer to store message in
- * @pid: netlink process id
+ * @portid: netlink process id
* @seq: sequence number of message
* @type: message type
* @payload: length of message payload
@@ -481,13 +440,13 @@ static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid,
* Returns NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
-static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
int type, int payload, int flags)
{
if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
return NULL;
- return __nlmsg_put(skb, pid, seq, type, payload, flags);
+ return __nlmsg_put(skb, portid, seq, type, payload, flags);
}
/**
@@ -506,7 +465,7 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
int type, int payload,
int flags)
{
- return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+ return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
type, payload, flags);
}
@@ -591,18 +550,18 @@ static inline void nlmsg_free(struct sk_buff *skb)
* nlmsg_multicast - multicast a netlink message
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
*/
static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 pid, unsigned int group, gfp_t flags)
+ u32 portid, unsigned int group, gfp_t flags)
{
int err;
NETLINK_CB(skb).dst_group = group;
- err = netlink_broadcast(sk, skb, pid, group, flags);
+ err = netlink_broadcast(sk, skb, portid, group, flags);
if (err > 0)
err = 0;
@@ -613,13 +572,13 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
* nlmsg_unicast - unicast a netlink message
* @sk: netlink socket to spread message to
* @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
*/
-static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
{
int err;
- err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
+ err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -638,6 +597,30 @@ static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
nlmsg_ok(pos, rem); \
pos = nlmsg_next(pos, &(rem)))
+/**
+ * nl_dump_check_consistent - check if sequence is consistent and advertise if not
+ * @cb: netlink callback structure that stores the sequence number
+ * @nlh: netlink message header to write the flag to
+ *
+ * This function checks if the sequence (generation) number changed during dump
+ * and if it did, advertises it in the netlink message header.
+ *
+ * The correct way to use it is to set cb->seq to the generation counter when
+ * all locks for dumping have been acquired, and then call this function for
+ * each message that is generated.
+ *
+ * Note that due to initialisation concerns, 0 is an invalid sequence number
+ * and must not be used by code that uses this functionality.
+ */
+static inline void
+nl_dump_check_consistent(struct netlink_callback *cb,
+ struct nlmsghdr *nlh)
+{
+ if (cb->prev_seq && cb->seq != cb->prev_seq)
+ nlh->nlmsg_flags |= NLM_F_DUMP_INTR;
+ cb->prev_seq = cb->seq;
+}
+
/**************************************************************************
* Netlink Attributes
**************************************************************************/
@@ -776,6 +759,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
}
/**
+ * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+ return nla_put(skb, attrtype, sizeof(__be16), &value);
+}
+
+/**
+ * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+{
+ return nla_put(skb, attrtype, sizeof(__le16), &value);
+}
+
+/**
* nla_put_u32 - Add a u32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -787,7 +803,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
}
/**
- * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+ return nla_put(skb, attrtype, sizeof(__be32), &value);
+}
+
+/**
+ * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+{
+ return nla_put(skb, attrtype, sizeof(__le32), &value);
+}
+
+/**
+ * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
@@ -798,6 +847,83 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
}
/**
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+ return nla_put(skb, attrtype, sizeof(__be64), &value);
+}
+
+/**
+ * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+{
+ return nla_put(skb, attrtype, sizeof(__le64), &value);
+}
+
+/**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+ return nla_put(skb, attrtype, sizeof(s8), &value);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+ return nla_put(skb, attrtype, sizeof(s16), &value);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+ return nla_put(skb, attrtype, sizeof(s32), &value);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+{
+ return nla_put(skb, attrtype, sizeof(s64), &value);
+}
+
+/**
* nla_put_string - Add a string netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -832,51 +958,6 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
return nla_put(skb, attrtype, sizeof(u64), &tmp);
}
-#define NLA_PUT(skb, attrtype, attrlen, data) \
- do { \
- if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \
- goto nla_put_failure; \
- } while(0)
-
-#define NLA_PUT_TYPE(skb, type, attrtype, value) \
- do { \
- type __tmp = value; \
- NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
- } while(0)
-
-#define NLA_PUT_U8(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, u8, attrtype, value)
-
-#define NLA_PUT_U16(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, u16, attrtype, value)
-
-#define NLA_PUT_LE16(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __le16, attrtype, value)
-
-#define NLA_PUT_BE16(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be16, attrtype, value)
-
-#define NLA_PUT_U32(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, u32, attrtype, value)
-
-#define NLA_PUT_BE32(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be32, attrtype, value)
-
-#define NLA_PUT_U64(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, u64, attrtype, value)
-
-#define NLA_PUT_BE64(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be64, attrtype, value)
-
-#define NLA_PUT_STRING(skb, attrtype, value) \
- NLA_PUT(skb, attrtype, strlen(value) + 1, value)
-
-#define NLA_PUT_FLAG(skb, attrtype) \
- NLA_PUT(skb, attrtype, 0, NULL)
-
-#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
- NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
-
/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
@@ -958,6 +1039,46 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
}
/**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+ return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+ return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+ return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+ s64 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+
+ return tmp;
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index d4958d4c657..773cce308bc 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -3,33 +3,110 @@
#include <linux/list.h>
#include <linux/list_nulls.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/seqlock.h>
struct ctl_table_header;
struct nf_conntrack_ecache;
+struct nf_proto_net {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *ctl_table_header;
+ struct ctl_table *ctl_table;
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ struct ctl_table_header *ctl_compat_header;
+ struct ctl_table *ctl_compat_table;
+#endif
+#endif
+ unsigned int users;
+};
+
+struct nf_generic_net {
+ struct nf_proto_net pn;
+ unsigned int timeout;
+};
+
+struct nf_tcp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
+ unsigned int tcp_loose;
+ unsigned int tcp_be_liberal;
+ unsigned int tcp_max_retrans;
+};
+
+enum udp_conntrack {
+ UDP_CT_UNREPLIED,
+ UDP_CT_REPLIED,
+ UDP_CT_MAX
+};
+
+struct nf_udp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[UDP_CT_MAX];
+};
+
+struct nf_icmp_net {
+ struct nf_proto_net pn;
+ unsigned int timeout;
+};
+
+struct nf_ip_net {
+ struct nf_generic_net generic;
+ struct nf_tcp_net tcp;
+ struct nf_udp_net udp;
+ struct nf_icmp_net icmp;
+ struct nf_icmp_net icmpv6;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ struct ctl_table_header *ctl_table_header;
+ struct ctl_table *ctl_table;
+#endif
+};
+
+struct ct_pcpu {
+ spinlock_t lock;
+ struct hlist_nulls_head unconfirmed;
+ struct hlist_nulls_head dying;
+ struct hlist_nulls_head tmpl;
+};
+
struct netns_ct {
atomic_t count;
unsigned int expect_count;
- unsigned int htable_size;
- struct kmem_cache *nf_conntrack_cachep;
- struct hlist_nulls_head *hash;
- struct hlist_head *expect_hash;
- struct hlist_nulls_head unconfirmed;
- struct hlist_nulls_head dying;
- struct ip_conntrack_stat __percpu *stat;
- int sysctl_events;
- unsigned int sysctl_events_retry_timeout;
- int sysctl_acct;
- int sysctl_checksum;
- unsigned int sysctl_log_invalid; /* Log invalid packets */
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
+ struct ctl_table_header *tstamp_sysctl_header;
struct ctl_table_header *event_sysctl_header;
+ struct ctl_table_header *helper_sysctl_header;
#endif
- int hash_vmalloc;
- int expect_vmalloc;
char *slabname;
+ unsigned int sysctl_log_invalid; /* Log invalid packets */
+ unsigned int sysctl_events_retry_timeout;
+ int sysctl_events;
+ int sysctl_acct;
+ int sysctl_auto_assign_helper;
+ bool auto_assign_helper_warned;
+ int sysctl_tstamp;
+ int sysctl_checksum;
+
+ unsigned int htable_size;
+ seqcount_t generation;
+ struct kmem_cache *nf_conntrack_cachep;
+ struct hlist_nulls_head *hash;
+ struct hlist_head *expect_hash;
+ struct ct_pcpu __percpu *pcpu_lists;
+ struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
+ struct nf_ip_net nf_ct_proto;
+#if defined(CONFIG_NF_CONNTRACK_LABELS)
+ unsigned int labels_used;
+ u8 label_words;
+#endif
+#ifdef CONFIG_NF_NAT_NEEDED
+ struct hlist_head *nat_bysource;
+ unsigned int nat_htable_size;
+#endif
};
#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 3419bf5cd15..0931618c0f7 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -5,6 +5,7 @@
#ifndef __NET_GENERIC_H__
#define __NET_GENERIC_H__
+#include <linux/bug.h>
#include <linux/rcupdate.h>
/*
@@ -41,6 +42,7 @@ static inline void *net_generic(const struct net *net, int id)
ptr = ng->ptr[id - 1];
rcu_read_unlock();
+ BUG_ON(!ptr);
return ptr;
}
#endif
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 548d78f2cc4..c06ac58ca10 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
struct net;
-static inline unsigned net_hash_mix(struct net *net)
+static inline unsigned int net_hash_mix(struct net *net)
{
#ifdef CONFIG_NET_NS
/*
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
new file mode 100644
index 00000000000..e2070960bac
--- /dev/null
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -0,0 +1,22 @@
+/*
+ * ieee802154 6lowpan in net namespaces
+ */
+
+#include <net/inet_frag.h>
+
+#ifndef __NETNS_IEEE802154_6LOWPAN_H__
+#define __NETNS_IEEE802154_6LOWPAN_H__
+
+struct netns_sysctl_lowpan {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *frags_hdr;
+#endif
+};
+
+struct netns_ieee802154_lowpan {
+ struct netns_sysctl_lowpan sysctl;
+ struct netns_frags frags;
+ int max_dsize;
+};
+
+#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d68c3f12177..aec5e12f9f1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -5,13 +5,25 @@
#ifndef __NETNS_IPV4_H__
#define __NETNS_IPV4_H__
+#include <linux/uidgid.h>
#include <net/inet_frag.h>
+struct tcpm_hash_bucket;
struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
struct hlist_head;
+struct fib_table;
struct sock;
+struct local_ports {
+ seqlock_t lock;
+ int range[2];
+};
+
+struct ping_group_range {
+ seqlock_t lock;
+ kgid_t range[2];
+};
struct netns_ipv4 {
#ifdef CONFIG_SYSCTL
@@ -19,18 +31,27 @@ struct netns_ipv4 {
struct ctl_table_header *frags_hdr;
struct ctl_table_header *ipv4_hdr;
struct ctl_table_header *route_hdr;
+ struct ctl_table_header *xfrm4_hdr;
#endif
struct ipv4_devconf *devconf_all;
struct ipv4_devconf *devconf_dflt;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
+ bool fib_has_custom_rules;
+ struct fib_table *fib_local;
+ struct fib_table *fib_main;
+ struct fib_table *fib_default;
+#endif
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ int fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
struct sock **icmp_sk;
- struct sock *tcp_sock;
-
+ struct inet_peer_base *peers;
+ struct tcpm_hash_bucket *tcp_metrics_hash;
+ unsigned int tcp_metrics_hash_log;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *iptable_filter;
@@ -41,9 +62,6 @@ struct netns_ipv4 {
struct xt_table *iptable_security;
#endif
struct xt_table *nat_table;
- struct hlist_head *nat_bysource;
- unsigned int nat_htable_size;
- int nat_vmalloced;
#endif
int sysctl_icmp_echo_ignore_all;
@@ -52,10 +70,23 @@ struct netns_ipv4 {
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
int sysctl_icmp_errors_use_inbound_ifaddr;
- int sysctl_rt_cache_rebuild_count;
- int current_rt_cache_rebuild_count;
- atomic_t rt_genid;
+ struct local_ports ip_local_ports;
+
+ int sysctl_tcp_ecn;
+ int sysctl_ip_no_pmtu_disc;
+ int sysctl_ip_fwd_use_pmtu;
+
+ int sysctl_fwmark_reflect;
+ int sysctl_tcp_fwmark_accept;
+
+ struct ping_group_range ping_group_range;
+
+ atomic_t dev_addr_genid;
+
+#ifdef CONFIG_SYSCTL
+ unsigned long *sysctl_local_reserved_ports;
+#endif
#ifdef CONFIG_IP_MROUTE
#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -65,5 +96,6 @@ struct netns_ipv4 {
struct fib_rules_ops *mr_rules_ops;
#endif
#endif
+ atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 81abfcb2eb4..19d3446e59d 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -12,8 +12,11 @@ struct ctl_table_header;
struct netns_sysctl_ipv6 {
#ifdef CONFIG_SYSCTL
- struct ctl_table_header *table;
+ struct ctl_table_header *hdr;
+ struct ctl_table_header *route_hdr;
+ struct ctl_table_header *icmp_hdr;
struct ctl_table_header *frags_hdr;
+ struct ctl_table_header *xfrm6_hdr;
#endif
int bindv6only;
int flush_delay;
@@ -24,13 +27,17 @@ struct netns_sysctl_ipv6 {
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
+ int flowlabel_consistency;
int icmpv6_time;
+ int anycast_src_echo_reply;
+ int fwmark_reflect;
};
struct netns_ipv6 {
struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
+ struct inet_peer_base *peers;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *ip6table_filter;
@@ -39,6 +46,7 @@ struct netns_ipv6 {
#ifdef CONFIG_SECURITY
struct xt_table *ip6table_security;
#endif
+ struct xt_table *ip6table_nat;
#endif
struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
@@ -66,5 +74,15 @@ struct netns_ipv6 {
struct fib_rules_ops *mr6_rules_ops;
#endif
#endif
+ atomic_t dev_addr_genid;
+ atomic_t rt_genid;
};
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+struct netns_nf_frag {
+ struct netns_sysctl_ipv6 sysctl;
+ struct netns_frags frags;
+};
+#endif
+
#endif
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 0b44112e236..d542a4b28cc 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -10,15 +10,15 @@ struct netns_mib {
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
- DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct proc_dir_entry *proc_net_devsnmp6;
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
- DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
#endif
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
new file mode 100644
index 00000000000..88740024ccf
--- /dev/null
+++ b/include/net/netns/netfilter.h
@@ -0,0 +1,18 @@
+#ifndef __NETNS_NETFILTER_H
+#define __NETNS_NETFILTER_H
+
+#include <linux/proc_fs.h>
+#include <linux/netfilter.h>
+
+struct nf_logger;
+
+struct netns_nf {
+#if defined CONFIG_PROC_FS
+ struct proc_dir_entry *proc_netfilter;
+#endif
+ const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *nf_log_dir_header;
+#endif
+};
+#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644
index 00000000000..eee608b12cc
--- /dev/null
+++ b/include/net/netns/nftables.h
@@ -0,0 +1,20 @@
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct nft_af_info;
+
+struct netns_nftables {
+ struct list_head af_info;
+ struct list_head commit_list;
+ struct nft_af_info *ipv4;
+ struct nft_af_info *ipv6;
+ struct nft_af_info *inet;
+ struct nft_af_info *arp;
+ struct nft_af_info *bridge;
+ unsigned int base_seq;
+ u8 gencursor;
+};
+
+#endif
diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h
index cb4e894c0f8..17ec2b95c06 100644
--- a/include/net/netns/packet.h
+++ b/include/net/netns/packet.h
@@ -5,10 +5,10 @@
#define __NETNS_PACKET_H__
#include <linux/rculist.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
struct netns_packet {
- spinlock_t sklist_lock;
+ struct mutex sklist_lock;
struct hlist_head sklist;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644
index 00000000000..3573a81815a
--- /dev/null
+++ b/include/net/netns/sctp.h
@@ -0,0 +1,134 @@
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+ DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+#endif
+ /* This is the global socket data structure used for responding to
+ * the Out-of-the-blue (OOTB) packets. A control sock will be created
+ * for this socket at the initialization time.
+ */
+ struct sock *ctl_sock;
+
+ /* This is the global local address list.
+ * We actively maintain this complete list of addresses on
+ * the system by catching address add/delete events.
+ *
+ * It is a list of sctp_sockaddr_entry.
+ */
+ struct list_head local_addr_list;
+ struct list_head addr_waitq;
+ struct timer_list addr_wq_timer;
+ struct list_head auto_asconf_splist;
+ spinlock_t addr_wq_lock;
+
+ /* Lock that protects the local_addr_list writers */
+ spinlock_t local_addr_lock;
+
+ /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+ *
+ * The following protocol parameters are RECOMMENDED:
+ *
+ * RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8 (3 when converted to right shifts.)
+ * RTO.Beta - 1/4 (2 when converted to right shifts.)
+ */
+ unsigned int rto_initial;
+ unsigned int rto_min;
+ unsigned int rto_max;
+
+ /* Note: rto_alpha and rto_beta are really defined as inverse
+ * powers of two to facilitate integer operations.
+ */
+ int rto_alpha;
+ int rto_beta;
+
+ /* Max.Burst - 4 */
+ int max_burst;
+
+ /* Whether Cookie Preservative is enabled(1) or not(0) */
+ int cookie_preserve_enable;
+
+ /* The namespace default hmac alg */
+ char *sctp_hmac_alg;
+
+ /* Valid.Cookie.Life - 60 seconds */
+ unsigned int valid_cookie_life;
+
+ /* Delayed SACK timeout 200ms default*/
+ unsigned int sack_timeout;
+
+ /* HB.interval - 30 seconds */
+ unsigned int hb_interval;
+
+ /* Association.Max.Retrans - 10 attempts
+ * Path.Max.Retrans - 5 attempts (per destination address)
+ * Max.Init.Retransmits - 8 attempts
+ */
+ int max_retrans_association;
+ int max_retrans_path;
+ int max_retrans_init;
+ /* Potentially-Failed.Max.Retrans sysctl value
+ * taken from:
+ * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+ */
+ int pf_retrans;
+
+ /*
+ * Policy for preforming sctp/socket accounting
+ * 0 - do socket level accounting, all assocs share sk_sndbuf
+ * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
+ */
+ int sndbuf_policy;
+
+ /*
+ * Policy for preforming sctp/socket accounting
+ * 0 - do socket level accounting, all assocs share sk_rcvbuf
+ * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
+ */
+ int rcvbuf_policy;
+
+ int default_auto_asconf;
+
+ /* Flag to indicate if addip is enabled. */
+ int addip_enable;
+ int addip_noauth;
+
+ /* Flag to indicate if PR-SCTP is enabled. */
+ int prsctp_enable;
+
+ /* Flag to idicate if SCTP-AUTH is enabled */
+ int auth_enable;
+
+ /*
+ * Policy to control SCTP IPv4 address scoping
+ * 0 - Disable IPv4 address scoping
+ * 1 - Enable IPv4 address scoping
+ * 2 - Selectively allow only IPv4 private addresses
+ * 3 - Selectively allow only IPv4 link local address
+ */
+ int scope_policy;
+
+ /* Threshold for rwnd update SACKS. Receive buffer shifted this many
+ * bits is an indicator of when to send and window update SACK.
+ */
+ int rwnd_upd_shift;
+
+ /* Threshold for autoclose timeout, in seconds. */
+ unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index 591db7d657a..02fe40f8c8f 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -8,11 +8,18 @@ struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
+ bool notrack_deprecated_warning;
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
struct ebt_table *broute_table;
struct ebt_table *frame_filter;
struct ebt_table *frame_nat;
#endif
+#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG)
+ bool ulog_warn_deprecated;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG)
+ bool ebt_ulog_warn_deprecated;
+#endif
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 748f91f87cd..3492434baf8 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -6,6 +6,7 @@
#include <linux/workqueue.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
+#include <net/flowcache.h>
struct ctl_table_header;
@@ -33,8 +34,6 @@ struct netns_xfrm {
struct hlist_head state_gc_list;
struct work_struct state_gc_work;
- wait_queue_head_t km_waitq;
-
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
@@ -56,9 +55,21 @@ struct netns_xfrm {
#endif
struct dst_ops xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
+ spinlock_t xfrm_state_lock;
+ rwlock_t xfrm_policy_lock;
+ struct mutex xfrm_cfg_mutex;
+
+ /* flow cache part */
+ struct flow_cache flow_cache_global;
+ atomic_t flow_cache_genid;
+ struct list_head flow_cache_gc_list;
+ spinlock_t flow_cache_gc_lock;
+ struct work_struct flow_cache_gc_work;
+ struct work_struct flow_cache_flush_work;
+ struct mutex flow_flush_sem;
};
#endif
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644
index 00000000000..f2a9597ff53
--- /dev/null
+++ b/include/net/netprio_cgroup.h
@@ -0,0 +1,50 @@
+/*
+ * netprio_cgroup.h Control Group Priority set
+ *
+ *
+ * Authors: Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
+struct netprio_map {
+ struct rcu_head rcu;
+ u32 priomap_len;
+ u32 priomap[];
+};
+
+void sock_update_netprioidx(struct sock *sk);
+
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ struct cgroup_subsys_state *css;
+ u32 idx;
+
+ rcu_read_lock();
+ css = task_css(p, net_prio_cgrp_id);
+ idx = css->cgroup->id;
+ rcu_read_unlock();
+ return idx;
+}
+#else /* !CONFIG_CGROUP_NET_PRIO */
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ return 0;
+}
+
+#define sock_update_netprioidx(sk)
+
+#endif /* CONFIG_CGROUP_NET_PRIO */
+#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/netrom.h b/include/net/netrom.h
index f0793c1cb5f..110350aca3d 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
nr_node_put(nr_node);
}
-#define nr_neigh_for_each(__nr_neigh, node, list) \
- hlist_for_each_entry(__nr_neigh, node, list, neigh_node)
+#define nr_neigh_for_each(__nr_neigh, list) \
+ hlist_for_each_entry(__nr_neigh, list, neigh_node)
-#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \
- hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node)
+#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
+ hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
-#define nr_node_for_each(__nr_node, node, list) \
- hlist_for_each_entry(__nr_node, node, list, node_node)
+#define nr_node_for_each(__nr_node, list) \
+ hlist_for_each_entry(__nr_node, list, node_node)
-#define nr_node_for_each_safe(__nr_node, node, node2, list) \
- hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node)
+#define nr_node_for_each_safe(__nr_node, node2, list) \
+ hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
/*********************************************************************/
@@ -183,51 +183,50 @@ extern int sysctl_netrom_routing_control;
extern int sysctl_netrom_link_fails_count;
extern int sysctl_netrom_reset_circuit;
-extern int nr_rx_frame(struct sk_buff *, struct net_device *);
-extern void nr_destroy_socket(struct sock *);
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
/* nr_dev.c */
-extern int nr_rx_ip(struct sk_buff *, struct net_device *);
-extern void nr_setup(struct net_device *);
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
/* nr_in.c */
-extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
/* nr_loopback.c */
-extern void nr_loopback_init(void);
-extern void nr_loopback_clear(void);
-extern int nr_loopback_queue(struct sk_buff *);
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
/* nr_out.c */
-extern void nr_output(struct sock *, struct sk_buff *);
-extern void nr_send_nak_frame(struct sock *);
-extern void nr_kick(struct sock *);
-extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_establish_data_link(struct sock *);
-extern void nr_enquiry_response(struct sock *);
-extern void nr_check_iframes_acked(struct sock *, unsigned short);
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
/* nr_route.c */
-extern void nr_rt_device_down(struct net_device *);
-extern struct net_device *nr_dev_first(void);
-extern struct net_device *nr_dev_get(ax25_address *);
-extern int nr_rt_ioctl(unsigned int, void __user *);
-extern void nr_link_failed(ax25_cb *, int);
-extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
extern const struct file_operations nr_nodes_fops;
extern const struct file_operations nr_neigh_fops;
-extern void nr_rt_free(void);
+void nr_rt_free(void);
/* nr_subr.c */
-extern void nr_clear_queues(struct sock *);
-extern void nr_frames_acked(struct sock *, unsigned short);
-extern void nr_requeue_frames(struct sock *);
-extern int nr_validate_nr(struct sock *, unsigned short);
-extern int nr_in_rx_window(struct sock *, unsigned short);
-extern void nr_write_internal(struct sock *, int);
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
-extern void __nr_transmit_reply(struct sk_buff *skb, int mine,
- unsigned char cmdflags);
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
/*
* This routine is called when a Connect Acknowledge with the Choke Flag
@@ -247,24 +246,24 @@ do { \
__nr_transmit_reply((skb), (mine), NR_RESET); \
} while (0)
-extern void nr_disconnect(struct sock *, int);
+void nr_disconnect(struct sock *, int);
/* nr_timer.c */
-extern void nr_init_timers(struct sock *sk);
-extern void nr_start_heartbeat(struct sock *);
-extern void nr_start_t1timer(struct sock *);
-extern void nr_start_t2timer(struct sock *);
-extern void nr_start_t4timer(struct sock *);
-extern void nr_start_idletimer(struct sock *);
-extern void nr_stop_heartbeat(struct sock *);
-extern void nr_stop_t1timer(struct sock *);
-extern void nr_stop_t2timer(struct sock *);
-extern void nr_stop_t4timer(struct sock *);
-extern void nr_stop_idletimer(struct sock *);
-extern int nr_t1timer_running(struct sock *);
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
/* sysctl_net_netrom.c */
-extern void nr_register_sysctl(void);
-extern void nr_unregister_sysctl(void);
+void nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
#endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
new file mode 100644
index 00000000000..bdf55c3b7a1
--- /dev/null
+++ b/include/net/nfc/digital.h
@@ -0,0 +1,248 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __NFC_DIGITAL_H
+#define __NFC_DIGITAL_H
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/**
+ * Configuration types for in_configure_hw and tg_configure_hw.
+ */
+enum {
+ NFC_DIGITAL_CONFIG_RF_TECH = 0,
+ NFC_DIGITAL_CONFIG_FRAMING,
+};
+
+/**
+ * RF technology values passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type.
+ */
+enum {
+ NFC_DIGITAL_RF_TECH_106A = 0,
+ NFC_DIGITAL_RF_TECH_212F,
+ NFC_DIGITAL_RF_TECH_424F,
+ NFC_DIGITAL_RF_TECH_ISO15693,
+ NFC_DIGITAL_RF_TECH_106B,
+
+ NFC_DIGITAL_RF_TECH_LAST,
+};
+
+/**
+ * Framing configuration passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type.
+ */
+enum {
+ NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+
+ NFC_DIGITAL_FRAMING_NFCA_T1T,
+ NFC_DIGITAL_FRAMING_NFCA_T2T,
+ NFC_DIGITAL_FRAMING_NFCA_T4T,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
+
+ NFC_DIGITAL_FRAMING_NFCF,
+ NFC_DIGITAL_FRAMING_NFCF_T3T,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
+ NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
+
+ NFC_DIGITAL_FRAMING_ISO15693_INVENTORY,
+ NFC_DIGITAL_FRAMING_ISO15693_T5T,
+
+ NFC_DIGITAL_FRAMING_NFCB,
+ NFC_DIGITAL_FRAMING_NFCB_T4T,
+
+ NFC_DIGITAL_FRAMING_LAST,
+};
+
+#define DIGITAL_MDAA_NFCID1_SIZE 3
+
+struct digital_tg_mdaa_params {
+ u16 sens_res;
+ u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE];
+ u8 sel_res;
+
+ u8 nfcid2[NFC_NFCID2_MAXSIZE];
+ u16 sc;
+};
+
+struct nfc_digital_dev;
+
+/**
+ * nfc_digital_cmd_complete_t - Definition of command result callback
+ *
+ * @ddev: nfc_digital_device ref
+ * @arg: user data
+ * @resp: response data
+ *
+ * resp pointer can be an error code and will be checked with IS_ERR() macro.
+ * The callback is responsible for freeing resp sk_buff.
+ */
+typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp);
+
+/**
+ * Device side NFC Digital operations
+ *
+ * Initiator mode:
+ * @in_configure_hw: Hardware configuration for RF technology and communication
+ * framing in initiator mode. This is a synchronous function.
+ * @in_send_cmd: Initiator mode data exchange using RF technology and framing
+ * previously set with in_configure_hw. The peer response is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ *
+ * Target mode: Only NFC-DEP protocol is supported in target mode.
+ * @tg_configure_hw: Hardware configuration for RF technology and communication
+ * framing in target mode. This is a synchronous function.
+ * @tg_send_cmd: Target mode data exchange using RF technology and framing
+ * previously set with tg_configure_hw. The peer next command is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ * @tg_listen: Put the device in listen mode waiting for data from the peer
+ * device. This is an asynchronous function.
+ * @tg_listen_mdaa: If supported, put the device in automatic listen mode with
+ * mode detection and automatic anti-collision. In this mode, the device
+ * automatically detects the RF technology and executes the anti-collision
+ * detection using the command responses specified in mdaa_params. The
+ * mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF
+ * tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns
+ * the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
+ * tech by analyzing the SoD of the frame containing the ATR_REQ command.
+ * This is an asynchronous function.
+ *
+ * @switch_rf: Turns device radio on or off. The stack does not call explicitly
+ * switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
+ * the device radio on.
+ * @abort_cmd: Discard the last sent command.
+ *
+ * Notes: Asynchronous functions have a timeout parameter. It is the driver
+ * responsibility to call the digital stack back through the
+ * nfc_digital_cmd_complete_t callback when no RF respsonse has been
+ * received within the specified time (in milliseconds). In that case the
+ * driver must set the resp sk_buff to ERR_PTR(-ETIMEDOUT).
+ * Since the digital stack serializes commands to be sent, it's mandatory
+ * for the driver to handle the timeout correctly. Otherwise the stack
+ * would not be able to send new commands, waiting for the reply of the
+ * current one.
+ */
+struct nfc_digital_ops {
+ int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+
+ int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+ int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg);
+ int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *mdaa_params,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+
+ int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
+ void (*abort_cmd)(struct nfc_digital_dev *ddev);
+};
+
+#define NFC_DIGITAL_POLL_MODE_COUNT_MAX 6 /* 106A, 212F, and 424F in & tg */
+
+typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+struct digital_poll_tech {
+ u8 rf_tech;
+ digital_poll_t poll_func;
+};
+
+/**
+ * Driver capabilities - bit mask made of the following values
+ *
+ * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator
+ * mode.
+ * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target
+ * mode.
+ */
+#define NFC_DIGITAL_DRV_CAPS_IN_CRC 0x0001
+#define NFC_DIGITAL_DRV_CAPS_TG_CRC 0x0002
+
+struct nfc_digital_dev {
+ struct nfc_dev *nfc_dev;
+ struct nfc_digital_ops *ops;
+
+ u32 protocols;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ u32 driver_capabilities;
+ void *driver_data;
+
+ struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX];
+ u8 poll_tech_count;
+ u8 poll_tech_index;
+ struct mutex poll_lock;
+
+ struct work_struct cmd_work;
+ struct work_struct cmd_complete_work;
+ struct list_head cmd_queue;
+ struct mutex cmd_lock;
+
+ struct work_struct poll_work;
+
+ u8 curr_protocol;
+ u8 curr_rf_tech;
+ u8 curr_nfc_dep_pni;
+
+ u16 target_fsc;
+
+ int (*skb_check_crc)(struct sk_buff *skb);
+ void (*skb_add_crc)(struct sk_buff *skb);
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+ __u32 supported_protocols,
+ __u32 driver_capabilities,
+ int tx_headroom,
+ int tx_tailroom);
+void nfc_digital_free_device(struct nfc_digital_dev *ndev);
+int nfc_digital_register_device(struct nfc_digital_dev *ndev);
+void nfc_digital_unregister_device(struct nfc_digital_dev *ndev);
+
+static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev,
+ struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev,
+ void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev)
+{
+ return dev->driver_data;
+}
+
+#endif /* __NFC_DIGITAL_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
new file mode 100644
index 00000000000..61286db5438
--- /dev/null
+++ b/include/net/nfc/hci.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NET_HCI_H
+#define __NET_HCI_H
+
+#include <linux/skbuff.h>
+
+#include <net/nfc/nfc.h>
+
+struct nfc_hci_dev;
+
+struct nfc_hci_ops {
+ int (*open) (struct nfc_hci_dev *hdev);
+ void (*close) (struct nfc_hci_dev *hdev);
+ int (*load_session) (struct nfc_hci_dev *hdev);
+ int (*hci_ready) (struct nfc_hci_dev *hdev);
+ /*
+ * xmit must always send the complete buffer before
+ * returning. Returned result must be 0 for success
+ * or negative for failure.
+ */
+ int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+ int (*start_poll) (struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols);
+ int (*dep_link_up)(struct nfc_hci_dev *hdev, struct nfc_target *target,
+ u8 comm_mode, u8 *gb, size_t gb_len);
+ int (*dep_link_down)(struct nfc_hci_dev *hdev);
+ int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target);
+ int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target);
+ int (*im_transceive) (struct nfc_hci_dev *hdev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context);
+ int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+ int (*check_presence)(struct nfc_hci_dev *hdev,
+ struct nfc_target *target);
+ int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ struct sk_buff *skb);
+ int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
+ int (*discover_se)(struct nfc_hci_dev *dev);
+ int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+ int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+};
+
+/* Pipes */
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_LINK_MGMT_PIPE 0x00
+#define NFC_HCI_ADMIN_PIPE 0x01
+
+struct nfc_hci_gate {
+ u8 gate;
+ u8 pipe;
+};
+
+#define NFC_HCI_MAX_CUSTOM_GATES 50
+struct nfc_hci_init_data {
+ u8 gate_count;
+ struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
+ char session_id[9];
+};
+
+typedef int (*xmit) (struct sk_buff *skb, void *cb_data);
+
+#define NFC_HCI_MAX_GATES 256
+
+/*
+ * These values can be specified by a driver to indicate it requires some
+ * adaptation of the HCI standard.
+ *
+ * NFC_HCI_QUIRK_SHORT_CLEAR - send HCI_ADM_CLEAR_ALL_PIPE cmd with no params
+ */
+enum {
+ NFC_HCI_QUIRK_SHORT_CLEAR = 0,
+};
+
+struct nfc_hci_dev {
+ struct nfc_dev *ndev;
+
+ u32 max_data_link_payload;
+
+ bool shutting_down;
+
+ struct mutex msg_tx_mutex;
+
+ struct list_head msg_tx_queue;
+
+ struct work_struct msg_tx_work;
+
+ struct timer_list cmd_timer;
+ struct hci_msg *cmd_pending_msg;
+
+ struct sk_buff_head rx_hcp_frags;
+
+ struct work_struct msg_rx_work;
+
+ struct sk_buff_head msg_rx_queue;
+
+ struct nfc_hci_ops *ops;
+
+ struct nfc_llc *llc;
+
+ struct nfc_hci_init_data init_data;
+
+ void *clientdata;
+
+ u8 gate2pipe[NFC_HCI_MAX_GATES];
+
+ u8 sw_romlib;
+ u8 sw_patch;
+ u8 sw_flashlib_major;
+ u8 sw_flashlib_minor;
+
+ u8 hw_derivative;
+ u8 hw_version;
+ u8 hw_mpw;
+ u8 hw_software;
+ u8 hw_bsid;
+
+ int async_cb_type;
+ data_exchange_cb_t async_cb;
+ void *async_cb_context;
+
+ u8 *gb;
+ size_t gb_len;
+
+ unsigned long quirks;
+};
+
+/* hci device allocation */
+struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+ struct nfc_hci_init_data *init_data,
+ unsigned long quirks,
+ u32 protocols,
+ const char *llc_name,
+ int tx_headroom,
+ int tx_tailroom,
+ int max_link_payload);
+void nfc_hci_free_device(struct nfc_hci_dev *hdev);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev);
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
+
+int nfc_hci_result_to_errno(u8 result);
+
+/* Host IDs */
+#define NFC_HCI_HOST_CONTROLLER_ID 0x00
+#define NFC_HCI_TERMINAL_HOST_ID 0x01
+#define NFC_HCI_UICC_HOST_ID 0x02
+
+/* Host Controller Gates and registry indexes */
+#define NFC_HCI_ADMIN_GATE 0x00
+#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01
+#define NFC_HCI_ADMIN_MAX_PIPE 0x02
+#define NFC_HCI_ADMIN_WHITELIST 0x03
+#define NFC_HCI_ADMIN_HOST_LIST 0x04
+
+#define NFC_HCI_LOOPBACK_GATE 0x04
+
+#define NFC_HCI_ID_MGMT_GATE 0x05
+#define NFC_HCI_ID_MGMT_VERSION_SW 0x01
+#define NFC_HCI_ID_MGMT_VERSION_HW 0x03
+#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04
+#define NFC_HCI_ID_MGMT_MODEL_ID 0x05
+#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02
+#define NFC_HCI_ID_MGMT_GATES_LIST 0x06
+
+#define NFC_HCI_LINK_MGMT_GATE 0x06
+#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01
+
+#define NFC_HCI_RF_READER_B_GATE 0x11
+#define NFC_HCI_RF_READER_B_PUPI 0x03
+#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04
+#define NFC_HCI_RF_READER_B_AFI 0x02
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05
+
+#define NFC_HCI_RF_READER_A_GATE 0x13
+#define NFC_HCI_RF_READER_A_UID 0x02
+#define NFC_HCI_RF_READER_A_ATQA 0x04
+#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05
+#define NFC_HCI_RF_READER_A_SAK 0x03
+#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06
+#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01
+
+#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
+#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1
+#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3
+
+/* Generic events */
+#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01
+#define NFC_HCI_EVT_POST_DATA 0x02
+#define NFC_HCI_EVT_HOT_PLUG 0x03
+
+/* Reader RF gates events */
+#define NFC_HCI_EVT_READER_REQUESTED 0x10
+#define NFC_HCI_EVT_END_OPERATION 0x11
+
+/* Reader Application gate events */
+#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10
+
+/* receiving messages from lower layer */
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb);
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+
+/* connecting to gates and sending hci instructions */
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe);
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate);
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev);
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ struct sk_buff **skb);
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len);
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len, struct sk_buff **skb);
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len,
+ data_exchange_cb_t cb, void *cb_context);
+int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
+ const u8 *param, size_t param_len);
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ const u8 *param, size_t param_len);
+int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
+u32 nfc_hci_sak_to_protocol(u8 sak);
+
+#endif /* __NET_HCI_H */
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
new file mode 100644
index 00000000000..c25fbdee0d6
--- /dev/null
+++ b/include/net/nfc/llc.h
@@ -0,0 +1,52 @@
+/*
+ * Link Layer Control manager public interface
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NFC_LLC_H_
+#define __NFC_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <linux/skbuff.h>
+
+#define LLC_NOP_NAME "nop"
+#define LLC_SHDLC_NAME "shdlc"
+
+typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err);
+
+struct nfc_llc;
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+ xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, llc_failure_t llc_failure);
+void nfc_llc_free(struct nfc_llc *llc);
+
+void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
+ int *rx_tailroom);
+
+
+int nfc_llc_start(struct nfc_llc *llc);
+int nfc_llc_stop(struct nfc_llc *llc);
+void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
+int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
+
+int nfc_llc_init(void);
+void nfc_llc_exit(void);
+
+#endif /* __NFC_LLC_H_ */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
new file mode 100644
index 00000000000..fbfa4e471ab
--- /dev/null
+++ b/include/net/nfc/nci.h
@@ -0,0 +1,396 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci.h, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __NCI_H
+#define __NCI_H
+
+/* NCI constants */
+#define NCI_MAX_NUM_MAPPING_CONFIGS 10
+#define NCI_MAX_NUM_RF_CONFIGS 10
+#define NCI_MAX_NUM_CONN 10
+#define NCI_MAX_PARAM_LEN 251
+
+/* NCI Status Codes */
+#define NCI_STATUS_OK 0x00
+#define NCI_STATUS_REJECTED 0x01
+#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
+#define NCI_STATUS_FAILED 0x03
+#define NCI_STATUS_NOT_INITIALIZED 0x04
+#define NCI_STATUS_SYNTAX_ERROR 0x05
+#define NCI_STATUS_SEMANTIC_ERROR 0x06
+#define NCI_STATUS_UNKNOWN_GID 0x07
+#define NCI_STATUS_UNKNOWN_OID 0x08
+#define NCI_STATUS_INVALID_PARAM 0x09
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
+/* Discovery Specific Status Codes */
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
+#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
+/* RF Interface Specific Status Codes */
+#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
+/* NFCEE Interface Specific Status Codes */
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
+#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
+
+/* NCI RF Technology and Mode */
+#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
+#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01
+#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
+#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
+#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
+#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06
+#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
+#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
+#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
+#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
+#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
+#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
+
+/* NCI RF Technologies */
+#define NCI_NFC_RF_TECHNOLOGY_A 0x00
+#define NCI_NFC_RF_TECHNOLOGY_B 0x01
+#define NCI_NFC_RF_TECHNOLOGY_F 0x02
+#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
+
+/* NCI Bit Rates */
+#define NCI_NFC_BIT_RATE_106 0x00
+#define NCI_NFC_BIT_RATE_212 0x01
+#define NCI_NFC_BIT_RATE_424 0x02
+#define NCI_NFC_BIT_RATE_848 0x03
+#define NCI_NFC_BIT_RATE_1695 0x04
+#define NCI_NFC_BIT_RATE_3390 0x05
+#define NCI_NFC_BIT_RATE_6780 0x06
+
+/* NCI RF Protocols */
+#define NCI_RF_PROTOCOL_UNKNOWN 0x00
+#define NCI_RF_PROTOCOL_T1T 0x01
+#define NCI_RF_PROTOCOL_T2T 0x02
+#define NCI_RF_PROTOCOL_T3T 0x03
+#define NCI_RF_PROTOCOL_ISO_DEP 0x04
+#define NCI_RF_PROTOCOL_NFC_DEP 0x05
+
+/* NCI RF Interfaces */
+#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
+#define NCI_RF_INTERFACE_FRAME 0x01
+#define NCI_RF_INTERFACE_ISO_DEP 0x02
+#define NCI_RF_INTERFACE_NFC_DEP 0x03
+
+/* NCI Configuration Parameter Tags */
+#define NCI_PN_ATR_REQ_GEN_BYTES 0x29
+
+/* NCI Reset types */
+#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
+#define NCI_RESET_TYPE_RESET_CONFIG 0x01
+
+/* NCI Static RF connection ID */
+#define NCI_STATIC_RF_CONN_ID 0x00
+
+/* NCI Data Flow Control */
+#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff
+
+/* NCI RF_DISCOVER_MAP_CMD modes */
+#define NCI_DISC_MAP_MODE_POLL 0x01
+#define NCI_DISC_MAP_MODE_LISTEN 0x02
+
+/* NCI Discover Notification Type */
+#define NCI_DISCOVER_NTF_TYPE_LAST 0x00
+#define NCI_DISCOVER_NTF_TYPE_LAST_NFCC 0x01
+#define NCI_DISCOVER_NTF_TYPE_MORE 0x02
+
+/* NCI Deactivation Type */
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
+#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
+
+/* Message Type (MT) */
+#define NCI_MT_DATA_PKT 0x00
+#define NCI_MT_CMD_PKT 0x01
+#define NCI_MT_RSP_PKT 0x02
+#define NCI_MT_NTF_PKT 0x03
+
+#define nci_mt(hdr) (((hdr)[0]>>5)&0x07)
+#define nci_mt_set(hdr, mt) ((hdr)[0] |= (__u8)(((mt)&0x07)<<5))
+
+/* Packet Boundary Flag (PBF) */
+#define NCI_PBF_LAST 0x00
+#define NCI_PBF_CONT 0x01
+
+#define nci_pbf(hdr) (__u8)(((hdr)[0]>>4)&0x01)
+#define nci_pbf_set(hdr, pbf) ((hdr)[0] |= (__u8)(((pbf)&0x01)<<4))
+
+/* Control Opcode manipulation */
+#define nci_opcode_pack(gid, oid) (__u16)((((__u16)((gid)&0x0f))<<8)|\
+ ((__u16)((oid)&0x3f)))
+#define nci_opcode(hdr) nci_opcode_pack(hdr[0], hdr[1])
+#define nci_opcode_gid(op) (__u8)(((op)&0x0f00)>>8)
+#define nci_opcode_oid(op) (__u8)((op)&0x003f)
+
+/* Payload Length */
+#define nci_plen(hdr) (__u8)((hdr)[2])
+
+/* Connection ID */
+#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
+
+/* GID values */
+#define NCI_GID_CORE 0x0
+#define NCI_GID_RF_MGMT 0x1
+#define NCI_GID_NFCEE_MGMT 0x2
+#define NCI_GID_PROPRIETARY 0xf
+
+/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */
+#define NCI_SPI_HDR_LEN 4
+#define NCI_SPI_CRC_LEN 2
+
+/* ---- NCI Packet structures ---- */
+#define NCI_CTRL_HDR_SIZE 3
+#define NCI_DATA_HDR_SIZE 3
+
+struct nci_ctrl_hdr {
+ __u8 gid; /* MT & PBF & GID */
+ __u8 oid;
+ __u8 plen;
+} __packed;
+
+struct nci_data_hdr {
+ __u8 conn_id; /* MT & PBF & ConnID */
+ __u8 rfu;
+ __u8 plen;
+} __packed;
+
+/* ------------------------ */
+/* ----- NCI Commands ---- */
+/* ------------------------ */
+#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_cmd {
+ __u8 reset_type;
+} __packed;
+
+#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
+
+#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct set_config_param {
+ __u8 id;
+ __u8 len;
+ __u8 val[NCI_MAX_PARAM_LEN];
+} __packed;
+
+struct nci_core_set_config_cmd {
+ __u8 num_params;
+ struct set_config_param param; /* support 1 param per cmd is enough */
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+struct disc_map_config {
+ __u8 rf_protocol;
+ __u8 mode;
+ __u8 rf_interface;
+} __packed;
+
+struct nci_rf_disc_map_cmd {
+ __u8 num_mapping_configs;
+ struct disc_map_config mapping_configs
+ [NCI_MAX_NUM_MAPPING_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+struct disc_config {
+ __u8 rf_tech_and_mode;
+ __u8 frequency;
+} __packed;
+
+struct nci_rf_disc_cmd {
+ __u8 num_disc_configs;
+ struct disc_config disc_configs[NCI_MAX_NUM_RF_CONFIGS];
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_SELECT_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x04)
+struct nci_rf_discover_select_cmd {
+ __u8 rf_discovery_id;
+ __u8 rf_protocol;
+ __u8 rf_interface;
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_cmd {
+ __u8 type;
+} __packed;
+
+/* ----------------------- */
+/* ---- NCI Responses ---- */
+/* ----------------------- */
+#define NCI_OP_CORE_RESET_RSP nci_opcode_pack(NCI_GID_CORE, 0x00)
+struct nci_core_reset_rsp {
+ __u8 status;
+ __u8 nci_ver;
+ __u8 config_status;
+} __packed;
+
+#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
+struct nci_core_init_rsp_1 {
+ __u8 status;
+ __le32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces[0]; /* variable size array */
+ /* continuted in nci_core_init_rsp_2 */
+} __packed;
+
+struct nci_core_init_rsp_2 {
+ __u8 max_logical_connections;
+ __le16 max_routing_table_size;
+ __u8 max_ctrl_pkt_payload_len;
+ __le16 max_size_for_large_params;
+ __u8 manufact_id;
+ __le32 manufact_specific_info;
+} __packed;
+
+#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct nci_core_set_config_rsp {
+ __u8 status;
+ __u8 num_params;
+ __u8 params_id[0]; /* variable size array */
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
+
+#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+
+#define NCI_OP_RF_DISCOVER_SELECT_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x04)
+
+#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+
+/* --------------------------- */
+/* ---- NCI Notifications ---- */
+/* --------------------------- */
+#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
+struct conn_credit_entry {
+ __u8 conn_id;
+ __u8 credits;
+} __packed;
+
+struct nci_core_conn_credit_ntf {
+ __u8 num_entries;
+ struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
+} __packed;
+
+#define NCI_OP_CORE_GENERIC_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x07)
+
+#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
+struct nci_core_intf_error_ntf {
+ __u8 status;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_RF_DISCOVER_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
+struct rf_tech_specific_params_nfca_poll {
+ __u16 sens_res;
+ __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
+ __u8 nfcid1[10];
+ __u8 sel_res_len; /* 0 or 1 Bytes */
+ __u8 sel_res;
+} __packed;
+
+struct rf_tech_specific_params_nfcb_poll {
+ __u8 sensb_res_len;
+ __u8 sensb_res[12]; /* 11 or 12 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcf_poll {
+ __u8 bit_rate;
+ __u8 sensf_res_len;
+ __u8 sensf_res[18]; /* 16 or 18 Bytes */
+} __packed;
+
+struct nci_rf_discover_ntf {
+ __u8 rf_discovery_id;
+ __u8 rf_protocol;
+ __u8 rf_tech_and_mode;
+ __u8 rf_tech_specific_params_len;
+
+ union {
+ struct rf_tech_specific_params_nfca_poll nfca_poll;
+ struct rf_tech_specific_params_nfcb_poll nfcb_poll;
+ struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ } rf_tech_specific_params;
+
+ __u8 ntf_type;
+} __packed;
+
+#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+struct activation_params_nfca_poll_iso_dep {
+ __u8 rats_res_len;
+ __u8 rats_res[20];
+};
+
+struct activation_params_nfcb_poll_iso_dep {
+ __u8 attrib_res_len;
+ __u8 attrib_res[50];
+};
+
+struct activation_params_poll_nfc_dep {
+ __u8 atr_res_len;
+ __u8 atr_res[63];
+};
+
+struct nci_rf_intf_activated_ntf {
+ __u8 rf_discovery_id;
+ __u8 rf_interface;
+ __u8 rf_protocol;
+ __u8 activation_rf_tech_and_mode;
+ __u8 max_data_pkt_payload_size;
+ __u8 initial_num_credits;
+ __u8 rf_tech_specific_params_len;
+
+ union {
+ struct rf_tech_specific_params_nfca_poll nfca_poll;
+ struct rf_tech_specific_params_nfcb_poll nfcb_poll;
+ struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ } rf_tech_specific_params;
+
+ __u8 data_exch_rf_tech_and_mode;
+ __u8 data_exch_tx_bit_rate;
+ __u8 data_exch_rx_bit_rate;
+ __u8 activation_params_len;
+
+ union {
+ struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
+ struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
+ struct activation_params_poll_nfc_dep poll_nfc_dep;
+ } activation_params;
+
+} __packed;
+
+#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_ntf {
+ __u8 type;
+ __u8 reason;
+} __packed;
+
+#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
new file mode 100644
index 00000000000..1f9a0f5272f
--- /dev/null
+++ b/include/net/nfc/nci_core.h
@@ -0,0 +1,232 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.h, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __NCI_CORE_H
+#define __NCI_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/nci.h>
+
+/* NCI device flags */
+enum nci_flag {
+ NCI_INIT,
+ NCI_UP,
+ NCI_DATA_EXCHANGE,
+ NCI_DATA_EXCHANGE_TO,
+};
+
+/* NCI device states */
+enum nci_state {
+ NCI_IDLE,
+ NCI_DISCOVERY,
+ NCI_W4_ALL_DISCOVERIES,
+ NCI_W4_HOST_SELECT,
+ NCI_POLL_ACTIVE,
+};
+
+/* NCI timeouts */
+#define NCI_RESET_TIMEOUT 5000
+#define NCI_INIT_TIMEOUT 5000
+#define NCI_SET_CONFIG_TIMEOUT 5000
+#define NCI_RF_DISC_TIMEOUT 5000
+#define NCI_RF_DISC_SELECT_TIMEOUT 5000
+#define NCI_RF_DEACTIVATE_TIMEOUT 30000
+#define NCI_CMD_TIMEOUT 5000
+#define NCI_DATA_TIMEOUT 700
+
+struct nci_dev;
+
+struct nci_ops {
+ int (*open)(struct nci_dev *ndev);
+ int (*close)(struct nci_dev *ndev);
+ int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
+ int (*setup)(struct nci_dev *ndev);
+};
+
+#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
+#define NCI_MAX_DISCOVERED_TARGETS 10
+
+/* NCI Core structures */
+struct nci_dev {
+ struct nfc_dev *nfc_dev;
+ struct nci_ops *ops;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ atomic_t state;
+ unsigned long flags;
+
+ atomic_t cmd_cnt;
+ atomic_t credits_cnt;
+
+ struct timer_list cmd_timer;
+ struct timer_list data_timer;
+
+ struct workqueue_struct *cmd_wq;
+ struct work_struct cmd_work;
+
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+
+ struct sk_buff_head cmd_q;
+ struct sk_buff_head rx_q;
+ struct sk_buff_head tx_q;
+
+ struct mutex req_lock;
+ struct completion req_completion;
+ __u32 req_status;
+ __u32 req_result;
+
+ void *driver_data;
+
+ __u32 poll_prots;
+ __u32 target_active_prot;
+
+ struct nfc_target targets[NCI_MAX_DISCOVERED_TARGETS];
+ int n_targets;
+
+ /* received during NCI_OP_CORE_RESET_RSP */
+ __u8 nci_ver;
+
+ /* received during NCI_OP_CORE_INIT_RSP */
+ __u32 nfcc_features;
+ __u8 num_supported_rf_interfaces;
+ __u8 supported_rf_interfaces
+ [NCI_MAX_SUPPORTED_RF_INTERFACES];
+ __u8 max_logical_connections;
+ __u16 max_routing_table_size;
+ __u8 max_ctrl_pkt_payload_len;
+ __u16 max_size_for_large_params;
+ __u8 manufact_id;
+ __u32 manufact_specific_info;
+
+ /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
+ __u8 max_data_pkt_payload_size;
+ __u8 initial_num_credits;
+
+ /* stored during nci_data_exchange */
+ data_exchange_cb_t data_exchange_cb;
+ void *data_exchange_cb_context;
+ struct sk_buff *rx_data_reassembly;
+
+ /* stored during intf_activated_ntf */
+ __u8 remote_gb[NFC_MAX_GT_LEN];
+ __u8 remote_gb_len;
+};
+
+/* ----- NCI Devices ----- */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
+void nci_free_device(struct nci_dev *ndev);
+int nci_register_device(struct nci_dev *ndev);
+void nci_unregister_device(struct nci_dev *ndev);
+int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
+
+static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
+ unsigned int len,
+ gfp_t how)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + ndev->tx_headroom + ndev->tx_tailroom, how);
+ if (skb)
+ skb_reserve(skb, ndev->tx_headroom);
+
+ return skb;
+}
+
+static inline void nci_set_parent_dev(struct nci_dev *ndev, struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nci_set_drvdata(struct nci_dev *ndev, void *data)
+{
+ ndev->driver_data = data;
+}
+
+static inline void *nci_get_drvdata(struct nci_dev *ndev)
+{
+ return ndev->driver_data;
+}
+
+void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
+void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
+int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
+void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ int err);
+void nci_clear_target_list(struct nci_dev *ndev);
+
+/* ----- NCI requests ----- */
+#define NCI_REQ_DONE 0
+#define NCI_REQ_PEND 1
+#define NCI_REQ_CANCELED 2
+
+void nci_req_complete(struct nci_dev *ndev, int result);
+
+/* ----- NCI status code ----- */
+int nci_to_errno(__u8 code);
+
+/* ----- NCI over SPI acknowledge modes ----- */
+#define NCI_SPI_CRC_DISABLED 0x00
+#define NCI_SPI_CRC_ENABLED 0x01
+
+/* ----- NCI SPI structures ----- */
+struct nci_spi {
+ struct nci_dev *ndev;
+ struct spi_device *spi;
+
+ unsigned int xfer_udelay; /* microseconds delay between
+ transactions */
+ u8 acknowledge_mode;
+
+ struct completion req_completion;
+ u8 req_result;
+};
+
+/* ----- NCI SPI ----- */
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+ u8 acknowledge_mode, unsigned int delay,
+ struct nci_dev *ndev);
+int nci_spi_send(struct nci_spi *nspi,
+ struct completion *write_handshake_completion,
+ struct sk_buff *skb);
+struct sk_buff *nci_spi_read(struct nci_spi *nspi);
+
+#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
new file mode 100644
index 00000000000..6c583e244de
--- /dev/null
+++ b/include/net/nfc/nfc.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ *
+ * Authors:
+ * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
+ * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NET_NFC_H
+#define __NET_NFC_H
+
+#include <linux/nfc.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+
+#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
+
+struct nfc_phy_ops {
+ int (*write)(void *dev_id, struct sk_buff *skb);
+ int (*enable)(void *dev_id);
+ void (*disable)(void *dev_id);
+};
+
+struct nfc_dev;
+
+/**
+ * data_exchange_cb_t - Definition of nfc_data_exchange callback
+ *
+ * @context: nfc_data_exchange cb_context parameter
+ * @skb: response data
+ * @err: If an error has occurred during data exchange, it is the
+ * error number. Zero means no error.
+ *
+ * When a rx or tx package is lost or corrupted or the target gets out
+ * of the operating field, err is -EIO.
+ */
+typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
+ int err);
+
+typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err);
+
+struct nfc_target;
+
+struct nfc_ops {
+ int (*dev_up)(struct nfc_dev *dev);
+ int (*dev_down)(struct nfc_dev *dev);
+ int (*start_poll)(struct nfc_dev *dev,
+ u32 im_protocols, u32 tm_protocols);
+ void (*stop_poll)(struct nfc_dev *dev);
+ int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target,
+ u8 comm_mode, u8 *gb, size_t gb_len);
+ int (*dep_link_down)(struct nfc_dev *dev);
+ int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target,
+ u32 protocol);
+ void (*deactivate_target)(struct nfc_dev *dev,
+ struct nfc_target *target);
+ int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context);
+ int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
+ int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
+ int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
+
+ /* Secure Element API */
+ int (*discover_se)(struct nfc_dev *dev);
+ int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
+ int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
+ int (*se_io) (struct nfc_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+};
+
+#define NFC_TARGET_IDX_ANY -1
+#define NFC_MAX_GT_LEN 48
+#define NFC_ATR_RES_GT_OFFSET 15
+
+/**
+ * struct nfc_target - NFC target descriptiom
+ *
+ * @sens_res: 2 bytes describing the target SENS_RES response, if the target
+ * is a type A one. The %sens_res most significant byte must be byte 2
+ * as described by the NFC Forum digital specification (i.e. the platform
+ * configuration one) while %sens_res least significant byte is byte 1.
+ */
+struct nfc_target {
+ u32 idx;
+ u32 supported_protocols;
+ u16 sens_res;
+ u8 sel_res;
+ u8 nfcid1_len;
+ u8 nfcid1[NFC_NFCID1_MAXSIZE];
+ u8 nfcid2_len;
+ u8 nfcid2[NFC_NFCID2_MAXSIZE];
+ u8 sensb_res_len;
+ u8 sensb_res[NFC_SENSB_RES_MAXSIZE];
+ u8 sensf_res_len;
+ u8 sensf_res[NFC_SENSF_RES_MAXSIZE];
+ u8 hci_reader_gate;
+ u8 logical_idx;
+ u8 is_iso15693;
+ u8 iso15693_dsfid;
+ u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
+};
+
+/**
+ * nfc_se - A structure for NFC accessible secure elements.
+ *
+ * @idx: The secure element index. User space will enable or
+ * disable a secure element by its index.
+ * @type: The secure element type. It can be SE_UICC or
+ * SE_EMBEDDED.
+ * @state: The secure element state, either enabled or disabled.
+ *
+ */
+struct nfc_se {
+ struct list_head list;
+ u32 idx;
+ u16 type;
+ u16 state;
+};
+
+struct nfc_genl_data {
+ u32 poll_req_portid;
+ struct mutex genl_data_mutex;
+};
+
+struct nfc_dev {
+ int idx;
+ u32 target_next_idx;
+ struct nfc_target *targets;
+ int n_targets;
+ int targets_generation;
+ struct device dev;
+ bool dev_up;
+ bool fw_download_in_progress;
+ u8 rf_mode;
+ bool polling;
+ struct nfc_target *active_target;
+ bool dep_link_up;
+ struct nfc_genl_data genl_data;
+ u32 supported_protocols;
+
+ struct list_head secure_elements;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ struct timer_list check_pres_timer;
+ struct work_struct check_pres_work;
+
+ bool shutting_down;
+
+ struct rfkill *rfkill;
+
+ struct nfc_ops *ops;
+};
+#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
+
+extern struct class nfc_class;
+
+struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom);
+
+/**
+ * nfc_free_device - free nfc device
+ *
+ * @dev: The nfc device to free
+ */
+static inline void nfc_free_device(struct nfc_dev *dev)
+{
+ put_device(&dev->dev);
+}
+
+int nfc_register_device(struct nfc_dev *dev);
+
+void nfc_unregister_device(struct nfc_dev *dev);
+
+/**
+ * nfc_set_parent_dev - set the parent device
+ *
+ * @nfc_dev: The nfc device whose parent is being set
+ * @dev: The parent device
+ */
+static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
+ struct device *dev)
+{
+ nfc_dev->dev.parent = dev;
+}
+
+/**
+ * nfc_set_drvdata - set driver specifc data
+ *
+ * @dev: The nfc device
+ * @data: Pointer to driver specifc data
+ */
+static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+/**
+ * nfc_get_drvdata - get driver specifc data
+ *
+ * @dev: The nfc device
+ */
+static inline void *nfc_get_drvdata(struct nfc_dev *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+/**
+ * nfc_device_name - get the nfc device name
+ *
+ * @dev: The nfc device whose name to return
+ */
+static inline const char *nfc_device_name(struct nfc_dev *dev)
+{
+ return dev_name(&dev->dev);
+}
+
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+ unsigned int flags, unsigned int size,
+ unsigned int *err);
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
+
+int nfc_set_remote_general_bytes(struct nfc_dev *dev,
+ u8 *gt, u8 gt_len);
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
+
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
+
+int nfc_targets_found(struct nfc_dev *dev,
+ struct nfc_target *targets, int ntargets);
+int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
+
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode);
+
+int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
+ u8 *gb, size_t gb_len);
+int nfc_tm_deactivated(struct nfc_dev *dev);
+int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+
+void nfc_driver_failure(struct nfc_dev *dev, int err);
+
+int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
+int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
+
+void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
+ u8 payload_type, u8 direction);
+
+#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 99d2ba1c7e0..b23548e0409 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,7 +52,7 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
* Note: This is in section 7.3.2 of the IEEE 802.15.4 document.
*/
int ieee802154_nl_assoc_confirm(struct net_device *dev,
- u16 short_addr, u8 status);
+ __le16 short_addr, u8 status);
/**
* ieee802154_nl_disassoc_indic - Notify userland of disassociation.
@@ -111,8 +111,8 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
* Note: This API cannot indicate a beacon frame for a coordinator
* operating in long addressing mode.
*/
-int ieee802154_nl_beacon_indic(struct net_device *dev, u16 panid,
- u16 coord_addr);
+int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
+ __le16 coord_addr);
/**
* ieee802154_nl_start_confirm - Notify userland of completion of start.
diff --git a/include/net/p8022.h b/include/net/p8022.h
index 42e9fac51b3..05e41383856 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -1,13 +1,13 @@
#ifndef _NET_P8022_H
#define _NET_P8022_H
-extern struct datalink_proto *
- register_8022_client(unsigned char type,
- int (*func)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
-extern void unregister_8022_client(struct datalink_proto *proto);
+struct datalink_proto *
+register_8022_client(unsigned char type,
+ int (*func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
-extern struct datalink_proto *make_8023_client(void);
-extern void destroy_8023_client(struct datalink_proto *dl);
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/phonet/gprs.h b/include/net/phonet/gprs.h
index 928daf595be..bcd525e39a0 100644
--- a/include/net/phonet/gprs.h
+++ b/include/net/phonet/gprs.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2008 Nokia Corporation.
*
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b60b28c99e8..b669fe6dbc3 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -28,7 +28,6 @@ struct pep_sock {
/* XXX: union-ify listening vs connected stuff ? */
/* Listening socket stuff: */
- struct hlist_head ackq;
struct hlist_head hlist;
/* Connected socket stuff: */
@@ -45,10 +44,6 @@ struct pep_sock {
u8 tx_fc; /* TX flow control */
u8 init_enable; /* auto-enable at creation */
u8 aligned;
-#ifdef CONFIG_PHONET_PIPECTRLR
- u8 pipe_state;
- struct sockaddr_pn remote_pep;
-#endif
};
static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -158,6 +153,7 @@ enum {
PN_LEGACY_FLOW_CONTROL,
PN_ONE_CREDIT_FLOW_CONTROL,
PN_MULTI_CREDIT_FLOW_CONTROL,
+ PN_MAX_FLOW_CONTROL,
};
#define pn_flow_safe(fc) ((fc) >> 1)
@@ -169,21 +165,4 @@ enum {
PEP_IND_READY,
};
-#ifdef CONFIG_PHONET_PIPECTRLR
-#define PNS_PEP_CONNECT_UTID 0x02
-#define PNS_PIPE_CREATED_IND_UTID 0x04
-#define PNS_PIPE_ENABLE_UTID 0x0A
-#define PNS_PIPE_ENABLED_IND_UTID 0x0C
-#define PNS_PIPE_DISABLE_UTID 0x0F
-#define PNS_PIPE_DISABLED_IND_UTID 0x11
-#define PNS_PEP_DISCONNECT_UTID 0x06
-
-/* Used for tracking state of a pipe */
-enum {
- PIPE_IDLE,
- PIPE_DISABLED,
- PIPE_ENABLED,
-};
-#endif /* CONFIG_PHONET_PIPECTRLR */
-
#endif
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797f954..68e509750ca 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -36,6 +36,7 @@
struct pn_sock {
struct sock sk;
u16 sobject;
+ u16 dobject;
u8 resource;
};
@@ -107,8 +108,8 @@ struct phonet_protocol {
int sock_type;
};
-int phonet_proto_register(int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
int phonet_sysctl_init(void);
void phonet_sysctl_exit(void);
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 13649eb5741..8639de5750f 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -51,7 +51,7 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr);
int phonet_route_add(struct net_device *dev, u8 daddr);
int phonet_route_del(struct net_device *dev, u8 daddr);
void rtm_phonet_notify(int event, struct net_device *dev, u8 dst);
-struct net_device *phonet_route_get(struct net *net, u8 daddr);
+struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr);
struct net_device *phonet_route_output(struct net *net, u8 daddr);
#define PN_NO_ADDR 0xff
diff --git a/include/net/ping.h b/include/net/ping.h
new file mode 100644
index 00000000000..026479b61a2
--- /dev/null
+++ b/include/net/ping.h
@@ -0,0 +1,111 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the "ping" module.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _PING_H
+#define _PING_H
+
+#include <net/icmp.h>
+#include <net/netns/hash.h>
+
+/* PING_HTABLE_SIZE must be power of 2 */
+#define PING_HTABLE_SIZE 64
+#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
+
+#define ping_portaddr_for_each_entry(__sk, node, list) \
+ hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
+
+/*
+ * gid_t is either uint or ushort. We want to pass it to
+ * proc_dointvec_minmax(), so it must not be larger than MAX_INT
+ */
+#define GID_T_MAX (((gid_t)~0U) >> 1)
+
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+struct pingv6_ops {
+ int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+ void (*ip6_datagram_recv_common_ctl)(struct sock *sk,
+ struct msghdr *msg,
+ struct sk_buff *skb);
+ void (*ip6_datagram_recv_specific_ctl)(struct sock *sk,
+ struct msghdr *msg,
+ struct sk_buff *skb);
+ int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+ void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
+};
+
+struct ping_iter_state {
+ struct seq_net_private p;
+ int bucket;
+ sa_family_t family;
+};
+
+extern struct proto ping_prot;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct pingv6_ops pingv6_ops;
+#endif
+
+struct pingfakehdr {
+ struct icmphdr icmph;
+ struct iovec *iov;
+ sa_family_t family;
+ __wsum wcheck;
+};
+
+int ping_get_port(struct sock *sk, unsigned short ident);
+void ping_hash(struct sock *sk);
+void ping_unhash(struct sock *sk);
+
+int ping_init_sock(struct sock *sk);
+void ping_close(struct sock *sk, long timeout);
+int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void ping_err(struct sk_buff *skb, int offset, u32 info);
+int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
+ struct sk_buff *);
+
+int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len);
+int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+ void *user_icmph, size_t icmph_len);
+int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len);
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void ping_rcv(struct sk_buff *skb);
+
+#ifdef CONFIG_PROC_FS
+struct ping_seq_afinfo {
+ char *name;
+ sa_family_t family;
+ const struct file_operations *seq_fops;
+ const struct seq_operations seq_ops;
+};
+
+extern const struct file_operations ping_seq_fops;
+
+void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
+void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void ping_seq_stop(struct seq_file *seq, void *v);
+int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
+void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
+
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
+#endif
+
+void __init ping_init(void);
+int __init pingv6_init(void);
+void pingv6_exit(void);
+
+#endif /* _PING_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index dd3031aed9d..6da46dcf104 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -14,8 +14,8 @@ struct tcf_walker {
int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
};
-extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
@@ -62,18 +62,26 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
- struct tc_action *action;
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ struct list_head actions;
#endif
-};
-
-/* Map to export classifier specific extension TLV types to the
- * generic extensions API. Unsupported extensions must be set to 0.
- */
-struct tcf_ext_map {
+ /* Map to export classifier specific extension TLV types to the
+ * generic extensions API. Unsupported extensions must be set to 0.
+ */
int action;
int police;
};
+static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ exts->type = 0;
+ INIT_LIST_HEAD(&exts->actions);
+#endif
+ exts->action = action;
+ exts->police = police;
+}
+
/**
* tcf_exts_is_predicative - check if a predicative extension is present
* @exts: tc filter extensions handle
@@ -85,7 +93,7 @@ static inline int
tcf_exts_is_predicative(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- return !!exts->action;
+ return !list_empty(&exts->actions);
#else
return 0;
#endif
@@ -120,22 +128,20 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
- if (exts->action)
- return tcf_action_exec(skb, exts->action, res);
+ if (!list_empty(&exts->actions))
+ return tcf_action_exec(skb, &exts->actions, res);
#endif
return 0;
}
-extern int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
-extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
- struct tcf_exts *src);
-extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
+ struct nlattr **tb, struct nlattr *rate_tlv,
+ struct tcf_exts *exts, bool ovr);
+void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
+ struct tcf_exts *src);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/**
* struct tcf_pkt_info - packet information
@@ -238,14 +244,14 @@ struct tcf_ematch_ops {
struct list_head link;
};
-extern int tcf_em_register(struct tcf_ematch_ops *);
-extern void tcf_em_unregister(struct tcf_ematch_ops *);
-extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
- struct tcf_ematch_tree *);
-extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
-extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
-extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
- struct tcf_pkt_info *);
+int tcf_em_register(struct tcf_ematch_ops *);
+void tcf_em_unregister(struct tcf_ematch_ops *);
+int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
+ struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+ struct tcf_pkt_info *);
/**
* tcf_em_tree_change - replace ematch tree of a running classifier
@@ -323,34 +329,36 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
static inline int tcf_valid_offset(const struct sk_buff *skb,
const unsigned char *ptr, const int len)
{
- return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head);
+ return likely((ptr + len) <= skb_tail_pointer(skb) &&
+ ptr >= skb->head &&
+ (ptr <= (ptr + len)));
}
#ifdef CONFIG_NET_CLS_IND
#include <net/net_namespace.h>
static inline int
-tcf_change_indev(struct tcf_proto *tp, char *indev, struct nlattr *indev_tlv)
+tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
{
+ char indev[IFNAMSIZ];
+ struct net_device *dev;
+
if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
return -EINVAL;
- return 0;
+ dev = __dev_get_by_name(net, indev);
+ if (!dev)
+ return -ENODEV;
+ return dev->ifindex;
}
-static inline int
-tcf_match_indev(struct sk_buff *skb, char *indev)
+static inline bool
+tcf_match_indev(struct sk_buff *skb, int ifindex)
{
- struct net_device *dev;
-
- if (indev[0]) {
- if (!skb->skb_iif)
- return 0;
- dev = __dev_get_by_index(dev_net(skb->dev), skb->skb_iif);
- if (!dev || strcmp(indev, dev->name))
- return 0;
- }
-
- return 1;
+ if (!ifindex)
+ return true;
+ if (!skb->skb_iif)
+ return false;
+ return ifindex == skb->skb_iif;
}
#endif /* CONFIG_NET_CLS_IND */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d9549af6929..ec030cd7661 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -32,7 +32,7 @@ static inline void *qdisc_priv(struct Qdisc *q)
The result: [34]86 is not good choice for QoS router :-(
- The things are not so bad, because we may use artifical
+ The things are not so bad, because we may use artificial
clock evaluated by integration of network data flow
in the most critical places.
*/
@@ -64,34 +64,44 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
-extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
- psched_time_t expires);
-extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+
+static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
+ psched_time_t expires)
+{
+ qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+}
+
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
extern struct Qdisc_ops pfifo_qdisc_ops;
extern struct Qdisc_ops bfifo_qdisc_ops;
extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
-extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
-extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit);
-
-extern int register_qdisc(struct Qdisc_ops *qops);
-extern int unregister_qdisc(struct Qdisc_ops *qops);
-extern void qdisc_list_del(struct Qdisc *q);
-extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
-extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab);
-extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
-extern void qdisc_put_stab(struct qdisc_size_table *tab);
-extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
-extern int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
-
-extern void __qdisc_run(struct Qdisc *q);
+int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+ unsigned int limit);
+
+int register_qdisc(struct Qdisc_ops *qops);
+int unregister_qdisc(struct Qdisc_ops *qops);
+void qdisc_get_default(char *id, size_t len);
+int qdisc_set_default(const char *id);
+
+void qdisc_list_add(struct Qdisc *q);
+void qdisc_list_del(struct Qdisc *q);
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab);
+void qdisc_put_rtab(struct qdisc_rate_table *tab);
+void qdisc_put_stab(struct qdisc_size_table *tab);
+void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock);
+
+void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
@@ -99,15 +109,15 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
- struct tcf_result *res);
-extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
+int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res);
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res);
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
-static inline unsigned psched_mtu(const struct net_device *dev)
+static inline unsigned int psched_mtu(const struct net_device *dev)
{
return dev->mtu + dev->hard_header_len;
}
diff --git a/include/net/protocol.h b/include/net/protocol.h
index dc07495bce4..d6fcc1fcdb5 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -25,52 +25,56 @@
#define _PROTOCOL_H
#include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#include <linux/skbuff.h>
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#endif
+#include <linux/netdevice.h>
-#define MAX_INET_PROTOS 256 /* Must be a power of 2 */
-
+/* This is one larger than the largest protocol value that can be
+ * found in an ipv4 or ipv6 header. Since in both cases the protocol
+ * value is presented in a __u8, this is defined to be 256.
+ */
+#define MAX_INET_PROTOS 256
/* This is used to register protocols. */
struct net_protocol {
+ void (*early_demux)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
- int (*gso_send_check)(struct sk_buff *skb);
- struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
- int (*gro_complete)(struct sk_buff *skb);
unsigned int no_policy:1,
- netns_ok:1;
+ netns_ok:1,
+ /* does the protocol do more stringent
+ * icmp tag validation than simple
+ * socket lookup?
+ */
+ icmp_strict_tag_validation:1;
};
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
+ void (*early_demux)(struct sk_buff *skb);
+
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset,
__be32 info);
-
- int (*gso_send_check)(struct sk_buff *skb);
- struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
- int (*gro_complete)(struct sk_buff *skb);
-
unsigned int flags; /* INET6_PROTO_xxx */
};
#define INET6_PROTO_NOPOLICY 0x1
#define INET6_PROTO_FINAL 0x2
-/* This should be set for any extension header which is compatible with GSO. */
-#define INET6_PROTO_GSO_EXTHDR 0x4
#endif
+struct net_offload {
+ struct offload_callbacks callbacks;
+ unsigned int flags; /* Flags used by IPv6 for now */
+};
+/* This should be set for any extension header which is compatible with GSO. */
+#define INET6_PROTO_GSO_EXTHDR 0x1
+
/* This is used to register socket interfaces for IP protocols. */
struct inet_protosw {
struct list_head list;
@@ -82,7 +86,6 @@ struct inet_protosw {
struct proto *prot;
const struct proto_ops *ops;
- char no_check; /* checksum on rcv/xmit/none? */
unsigned char flags; /* See INET_PROTOSW_* below. */
};
#define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */
@@ -90,21 +93,30 @@ struct inet_protosw {
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS];
+extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
-extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
-extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
-extern void inet_register_protosw(struct inet_protosw *p);
-extern void inet_unregister_protosw(struct inet_protosw *p);
-
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int inet6_register_protosw(struct inet_protosw *p);
-extern void inet6_unregister_protosw(struct inet_protosw *p);
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
+
+int udp_add_offload(struct udp_offload *prot);
+void udp_del_offload(struct udp_offload *prot);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
#endif
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
#endif /* _PROTOCOL_H */
diff --git a/include/net/psnap.h b/include/net/psnap.h
index fe456c295b0..78db4cc1306 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -1,11 +1,11 @@
#ifndef _NET_PSNAP_H
#define _NET_PSNAP_H
-extern struct datalink_proto *
+struct datalink_proto *
register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *,
struct net_device *orig_dev));
-extern void unregister_snap_client(struct datalink_proto *proto);
+void unregister_snap_client(struct datalink_proto *proto);
#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 42ce6fe7a2d..6a40c6562dd 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -26,7 +26,7 @@ extern struct proto raw_prot;
void raw_icmp_error(struct sk_buff *, int, u32);
int raw_local_deliver(struct sk_buff *, int);
-extern int raw_rcv(struct sock *, struct sk_buff *);
+int raw_rcv(struct sock *, struct sk_buff *);
#define RAW_HTABLE_SIZE MAX_INET_PROTOS
@@ -36,8 +36,8 @@ struct raw_hashinfo {
};
#ifdef CONFIG_PROC_FS
-extern int raw_proc_init(void);
-extern void raw_proc_exit(void);
+int raw_proc_init(void);
+void raw_proc_exit(void);
struct raw_iter_state {
struct seq_net_private p;
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index f6b9b830df8..87783dea079 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -1,16 +1,13 @@
#ifndef _NET_RAWV6_H
#define _NET_RAWV6_H
-#ifdef __KERNEL__
-
#include <net/protocol.h>
void raw6_icmp_error(struct sk_buff *, int nexthdr,
u8 type, u8 code, int inner_offset, __be32);
-int raw6_local_deliver(struct sk_buff *, int);
+bool raw6_local_deliver(struct sk_buff *, int);
-extern int rawv6_rcv(struct sock *sk,
- struct sk_buff *skb);
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
@@ -20,5 +17,3 @@ int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
#endif
#endif
-
-#endif
diff --git a/include/net/red.h b/include/net/red.h
index 995108e54d9..76e0b5f922c 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -2,9 +2,11 @@
#define __NET_SCHED_RED_H
#include <linux/types.h>
+#include <linux/bug.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
/* Random Early Detection (RED) algorithm.
=======================================
@@ -87,6 +89,29 @@
etc.
*/
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ * if (avg > target and max_p <= 0.5)
+ * increase max_p : max_p += alpha;
+ * else if (avg < target and max_p >= 0.01)
+ * decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ * qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
#define RED_STAB_SIZE 256
#define RED_STAB_MASK (RED_STAB_SIZE - 1)
@@ -97,87 +122,118 @@ struct red_stats {
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
u32 other; /* Drops due to drop() calls */
- u32 backlog;
};
struct red_parms {
/* Parameters */
- u32 qth_min; /* Min avg length threshold: A scaled */
- u32 qth_max; /* Max avg length threshold: A scaled */
+ u32 qth_min; /* Min avg length threshold: Wlog scaled */
+ u32 qth_max; /* Max avg length threshold: Wlog scaled */
u32 Scell_max;
- u32 Rmask; /* Cached random mask, see red_rmask */
+ u32 max_P; /* probability, [0 .. 1.0] 32 scaled */
+ /* reciprocal_value(max_P / qth_delta) */
+ struct reciprocal_value max_P_reciprocal;
+ u32 qth_delta; /* max_th - min_th */
+ u32 target_min; /* min_th + 0.4*(max_th - min_th) */
+ u32 target_max; /* min_th + 0.6*(max_th - min_th) */
u8 Scell_log;
u8 Wlog; /* log(W) */
u8 Plog; /* random number bits */
u8 Stab[RED_STAB_SIZE];
+};
+struct red_vars {
/* Variables */
int qcount; /* Number of packets since last random
number generation */
u32 qR; /* Cached random number */
- unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ unsigned long qavg; /* Average queue length: Wlog scaled */
+ ktime_t qidlestart; /* Start of current idle period */
};
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
{
- return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+ return Plog < 32 ? (~0U >> Plog) : ~0U;
}
-static inline void red_set_parms(struct red_parms *p,
- u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
- u8 Scell_log, u8 *stab)
+static inline void red_set_vars(struct red_vars *v)
{
/* Reset average queue length, the value is strictly bound
* to the parameters below, reseting hurts a bit but leaving
* it might result in an unreasonable qavg for a while. --TGR
*/
- p->qavg = 0;
+ v->qavg = 0;
+
+ v->qcount = -1;
+}
+
+static inline void red_set_parms(struct red_parms *p,
+ u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+ u8 Scell_log, u8 *stab, u32 max_P)
+{
+ int delta = qth_max - qth_min;
+ u32 max_p_delta;
- p->qcount = -1;
p->qth_min = qth_min << Wlog;
p->qth_max = qth_max << Wlog;
p->Wlog = Wlog;
p->Plog = Plog;
- p->Rmask = red_rmask(Plog);
+ if (delta < 0)
+ delta = 1;
+ p->qth_delta = delta;
+ if (!max_P) {
+ max_P = red_maxp(Plog);
+ max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+ }
+ p->max_P = max_P;
+ max_p_delta = max_P / delta;
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+
+ /* RED Adaptative target :
+ * [min_th + 0.4*(min_th - max_th),
+ * min_th + 0.6*(min_th - max_th)].
+ */
+ delta /= 5;
+ p->target_min = qth_min + 2*delta;
+ p->target_max = qth_min + 3*delta;
+
p->Scell_log = Scell_log;
p->Scell_max = (255 << Scell_log);
- memcpy(p->Stab, stab, sizeof(p->Stab));
+ if (stab)
+ memcpy(p->Stab, stab, sizeof(p->Stab));
}
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_vars *v)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return v->qidlestart.tv64 != 0;
}
-static inline void red_start_of_idle_period(struct red_parms *p)
+static inline void red_start_of_idle_period(struct red_vars *v)
{
- p->qidlestart = psched_get_time();
+ v->qidlestart = ktime_get();
}
-static inline void red_end_of_idle_period(struct red_parms *p)
+static inline void red_end_of_idle_period(struct red_vars *v)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ v->qidlestart.tv64 = 0;
}
-static inline void red_restart(struct red_parms *p)
+static inline void red_restart(struct red_vars *v)
{
- red_end_of_idle_period(p);
- p->qavg = 0;
- p->qcount = -1;
+ red_end_of_idle_period(v);
+ v->qavg = 0;
+ v->qcount = -1;
}
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
+ const struct red_vars *v)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
@@ -190,7 +246,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
*
* dummy packets as a burst after idle time, i.e.
*
- * p->qavg *= (1-W)^m
+ * v->qavg *= (1-W)^m
*
* This is an apparently overcomplicated solution (f.e. we have to
* precompute a table to make this calculation in reasonable time)
@@ -201,7 +257,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
if (shift)
- return p->qavg >> shift;
+ return v->qavg >> shift;
else {
/* Approximate initial part of exponent with linear function:
*
@@ -210,20 +266,21 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
* Seems, it is the best solution to
* problem of too coarse exponent tabulation.
*/
- us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log;
+ us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
- if (us_idle < (p->qavg >> 1))
- return p->qavg - us_idle;
+ if (us_idle < (v->qavg >> 1))
+ return v->qavg - us_idle;
else
- return p->qavg >> 1;
+ return v->qavg >> 1;
}
}
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
+ const struct red_vars *v,
unsigned int backlog)
{
/*
- * NOTE: p->qavg is fixed point number with point at Wlog.
+ * NOTE: v->qavg is fixed point number with point at Wlog.
* The formula below is equvalent to floating point
* version:
*
@@ -231,42 +288,46 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
*
* --ANK (980924)
*/
- return p->qavg + (backlog - (p->qavg >> p->Wlog));
+ return v->qavg + (backlog - (v->qavg >> p->Wlog));
}
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
+ const struct red_vars *v,
unsigned int backlog)
{
- if (!red_is_idling(p))
- return red_calc_qavg_no_idle_time(p, backlog);
+ if (!red_is_idling(v))
+ return red_calc_qavg_no_idle_time(p, v, backlog);
else
- return red_calc_qavg_from_idle_time(p);
+ return red_calc_qavg_from_idle_time(p, v);
}
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
{
- return net_random() & p->Rmask;
+ return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
}
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p,
+ const struct red_vars *v,
+ unsigned long qavg)
{
/* The formula used below causes questions.
- OK. qR is random number in the interval 0..Rmask
+ OK. qR is random number in the interval
+ (0..1/max_P)*(qth_max-qth_min)
i.e. 0..(2^Plog). If we used floating point
arithmetics, it would be: (2^Plog)*rnd_num,
where rnd_num is less 1.
Taking into account, that qavg have fixed
- point at Wlog, and Plog is related to max_P by
- max_P = (qth_max-qth_min)/2^Plog; two lines
+ point at Wlog, two lines
below have the following floating point equivalent:
max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
Any questions? --ANK (980924)
*/
- return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR);
+ return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
}
enum {
@@ -275,7 +336,7 @@ enum {
RED_ABOVE_MAX_TRESH,
};
-static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg)
+static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
{
if (qavg < p->qth_min)
return RED_BELOW_MIN_THRESH;
@@ -291,27 +352,29 @@ enum {
RED_HARD_MARK,
};
-static inline int red_action(struct red_parms *p, unsigned long qavg)
+static inline int red_action(const struct red_parms *p,
+ struct red_vars *v,
+ unsigned long qavg)
{
switch (red_cmp_thresh(p, qavg)) {
case RED_BELOW_MIN_THRESH:
- p->qcount = -1;
+ v->qcount = -1;
return RED_DONT_MARK;
case RED_BETWEEN_TRESH:
- if (++p->qcount) {
- if (red_mark_probability(p, qavg)) {
- p->qcount = 0;
- p->qR = red_random(p);
+ if (++v->qcount) {
+ if (red_mark_probability(p, v, qavg)) {
+ v->qcount = 0;
+ v->qR = red_random(p);
return RED_PROB_MARK;
}
} else
- p->qR = red_random(p);
+ v->qR = red_random(p);
return RED_DONT_MARK;
case RED_ABOVE_MAX_TRESH:
- p->qcount = -1;
+ v->qcount = -1;
return RED_HARD_MARK;
}
@@ -319,4 +382,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
return RED_DONT_MARK;
}
+static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
+{
+ unsigned long qavg;
+ u32 max_p_delta;
+
+ qavg = v->qavg;
+ if (red_is_idling(v))
+ qavg = red_calc_qavg_from_idle_time(p, v);
+
+ /* v->qavg is fixed point number with point at Wlog */
+ qavg >>= p->Wlog;
+
+ if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+ p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+ else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+ p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+ max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
#endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 356d6e3dc20..259992444e8 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -3,13 +3,22 @@
/*
* regulatory support structures
*
- * Copyright 2008-2009 Luis R. Rodriguez <lrodriguez@atheros.com>
+ * Copyright 2008-2009 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/rcupdate.h>
/**
* enum environment_cap - Environment parsed from country IE
@@ -27,22 +36,31 @@ enum environment_cap {
/**
* struct regulatory_request - used to keep track of regulatory requests
*
+ * @rcu_head: RCU head struct used to free the request
* @wiphy_idx: this is set if this request's initiator is
- * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This
- * can be used by the wireless core to deal with conflicts
- * and potentially inform users of which devices specifically
- * cased the conflicts.
+ * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This
+ * can be used by the wireless core to deal with conflicts
+ * and potentially inform users of which devices specifically
+ * cased the conflicts.
* @initiator: indicates who sent this request, could be any of
- * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
+ * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
* @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
- * regulatory domain. We have a few special codes:
- * 00 - World regulatory domain
- * 99 - built by driver but a specific alpha2 cannot be determined
- * 98 - result of an intersection between two regulatory domains
+ * regulatory domain. We have a few special codes:
+ * 00 - World regulatory domain
+ * 99 - built by driver but a specific alpha2 cannot be determined
+ * 98 - result of an intersection between two regulatory domains
* 97 - regulatory domain has not yet been configured
+ * @dfs_region: If CRDA responded with a regulatory domain that requires
+ * DFS master operation on a known DFS region (NL80211_DFS_*),
+ * dfs_region represents that region. Drivers can use this and the
+ * @alpha2 to adjust their device's DFS parameters as required.
+ * @user_reg_hint_type: if the @initiator was of type
+ * %NL80211_REGDOM_SET_BY_USER, this classifies the type
+ * of hint passed. This could be any of the %NL80211_USER_REG_HINT_*
+ * types.
* @intersect: indicates whether the wireless core should intersect
- * the requested regulatory domain with the presently set regulatory
- * domain.
+ * the requested regulatory domain with the presently set regulatory
+ * domain.
* @processed: indicates whether or not this requests has already been
* processed. When the last request is processed it means that the
* currently regulatory domain set on cfg80211 is updated from
@@ -50,21 +68,84 @@ enum environment_cap {
* the last request is not yet processed we must yield until it
* is processed before processing any new requests.
* @country_ie_checksum: checksum of the last processed and accepted
- * country IE
+ * country IE
* @country_ie_env: lets us know if the AP is telling us we are outdoor,
- * indoor, or if it doesn't matter
+ * indoor, or if it doesn't matter
* @list: used to insert into the reg_requests_list linked list
*/
struct regulatory_request {
+ struct rcu_head rcu_head;
int wiphy_idx;
enum nl80211_reg_initiator initiator;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
char alpha2[2];
+ enum nl80211_dfs_regions dfs_region;
bool intersect;
bool processed;
enum environment_cap country_ie_env;
struct list_head list;
};
+/**
+ * enum ieee80211_regulatory_flags - device regulatory flags
+ *
+ * @REGULATORY_CUSTOM_REG: tells us the driver for this device
+ * has its own custom regulatory domain and cannot identify the
+ * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
+ * we will disregard the first regulatory hint (when the
+ * initiator is %REGDOM_SET_BY_CORE). Drivers that use
+ * wiphy_apply_custom_regulatory() should have this flag set
+ * or the regulatory core will set it for the wiphy.
+ * If you use regulatory_hint() *after* using
+ * wiphy_apply_custom_regulatory() the wireless core will
+ * clear the REGULATORY_CUSTOM_REG for your wiphy as it would be
+ * implied that the device somehow gained knowledge of its region.
+ * @REGULATORY_STRICT_REG: tells us that the wiphy for this device
+ * has regulatory domain that it wishes to be considered as the
+ * superset for regulatory rules. After this device gets its regulatory
+ * domain programmed further regulatory hints shall only be considered
+ * for this device to enhance regulatory compliance, forcing the
+ * device to only possibly use subsets of the original regulatory
+ * rules. For example if channel 13 and 14 are disabled by this
+ * device's regulatory domain no user specified regulatory hint which
+ * has these channels enabled would enable them for this wiphy,
+ * the device's original regulatory domain will be trusted as the
+ * base. You can program the superset of regulatory rules for this
+ * wiphy with regulatory_hint() for cards programmed with an
+ * ISO3166-alpha2 country code. wiphys that use regulatory_hint()
+ * will have their wiphy->regd programmed once the regulatory
+ * domain is set, and all other regulatory hints will be ignored
+ * until their own regulatory domain gets programmed.
+ * @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to
+ * ensure that passive scan flags and beaconing flags may not be lifted by
+ * cfg80211 due to regulatory beacon hints. For more information on beacon
+ * hints read the documenation for regulatory_hint_found_beacon()
+ * @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference
+ * that even though they may have programmed their own custom power
+ * setting prior to wiphy registration, they want to ensure their channel
+ * power settings are updated for this connection with the power settings
+ * derived from the regulatory domain. The regulatory domain used will be
+ * based on the ISO3166-alpha2 from country IE provided through
+ * regulatory_hint_country_ie()
+ * @REGULATORY_COUNTRY_IE_IGNORE: for devices that have a preference to ignore
+ * all country IE information processed by the regulatory core. This will
+ * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will
+ * be ignored.
+ * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the
+ * NO_IR relaxation, which enables transmissions on channels on which
+ * otherwise initiating radiation is not allowed. This will enable the
+ * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+ * option
+ */
+enum ieee80211_regulatory_flags {
+ REGULATORY_CUSTOM_REG = BIT(0),
+ REGULATORY_STRICT_REG = BIT(1),
+ REGULATORY_DISABLE_BEACON_HINTS = BIT(2),
+ REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
+ REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
+ REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
+};
+
struct ieee80211_freq_range {
u32 start_freq_khz;
u32 end_freq_khz;
@@ -80,11 +161,14 @@ struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
u32 flags;
+ u32 dfs_cac_ms;
};
struct ieee80211_regdomain {
+ struct rcu_head rcu_head;
u32 n_reg_rules;
char alpha2[2];
+ enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
};
@@ -95,14 +179,18 @@ struct ieee80211_regdomain {
#define DBM_TO_MBM(gain) ((gain) * 100)
#define MBM_TO_DBM(gain) ((gain) / 100)
-#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \
-{ \
- .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
- .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \
- .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \
- .power_rule.max_antenna_gain = DBI_TO_MBI(gain),\
- .power_rule.max_eirp = DBM_TO_MBM(eirp), \
- .flags = reg_flags, \
+#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \
+{ \
+ .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
+ .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \
+ .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \
+ .power_rule.max_antenna_gain = DBI_TO_MBI(gain), \
+ .power_rule.max_eirp = DBM_TO_MBM(eirp), \
+ .flags = reg_flags, \
+ .dfs_cac_ms = dfs_cac, \
}
+#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \
+ REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags)
+
#endif
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 99e6e19b57c..7f830ff67f0 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -27,19 +27,13 @@ struct sk_buff;
struct dst_entry;
struct proto;
-/* empty to "strongly type" an otherwise void parameter.
- */
-struct request_values {
-};
-
struct request_sock_ops {
int family;
int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(struct sock *sk,
- struct request_sock *req,
- struct request_values *rvp);
+ struct request_sock *req);
void (*send_ack)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(struct sock *sk,
@@ -49,13 +43,17 @@ struct request_sock_ops {
struct request_sock *req);
};
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
- struct request_sock *dl_next; /* Must be first member! */
+ struct sock_common __req_common;
+ struct request_sock *dl_next;
u16 mss;
- u8 retrans;
- u8 cookie_ts; /* syncookie: encode tcpopts in timestamp */
+ u8 num_retrans; /* number of retransmits */
+ u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
+ u8 num_timeout:7; /* number of timeouts */
/* The following two fields can be easily recomputed I think -AK */
u32 window_clamp; /* window clamp at creation time */
u32 rcv_wnd; /* rcv_wnd offered first time */
@@ -96,7 +94,8 @@ extern int sysctl_max_syn_backlog;
*/
struct listen_sock {
u8 max_qlen_log;
- /* 3 bytes hole, try to use */
+ u8 synflood_warned;
+ /* 2 bytes hole, try to use */
int qlen;
int qlen_young;
int clock_hand;
@@ -105,6 +104,34 @@ struct listen_sock {
struct request_sock *syn_table[0];
};
+/*
+ * For a TCP Fast Open listener -
+ * lock - protects the access to all the reqsk, which is co-owned by
+ * the listener and the child socket.
+ * qlen - pending TFO requests (still in TCP_SYN_RECV).
+ * max_qlen - max TFO reqs allowed before TFO is disabled.
+ *
+ * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
+ * structure above. But there is some implementation difficulty due to
+ * listen_sock being part of request_sock_queue hence will be freed when
+ * a listener is stopped. But TFO related fields may continue to be
+ * accessed even after a listener is closed, until its sk_refcnt drops
+ * to 0 implying no more outstanding TFO reqs. One solution is to keep
+ * listen_opt around until sk_refcnt drops to 0. But there is some other
+ * complexity that needs to be resolved. E.g., a listener can be disabled
+ * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
+ */
+struct fastopen_queue {
+ struct request_sock *rskq_rst_head; /* Keep track of past TFO */
+ struct request_sock *rskq_rst_tail; /* requests that caused RST.
+ * This is part of the defense
+ * against spoofing attack.
+ */
+ spinlock_t lock;
+ int qlen; /* # of pending (TCP_SYN_RECV) reqs */
+ int max_qlen; /* != 0 iff TFO is currently enabled */
+};
+
/** struct request_sock_queue - queue of request_socks
*
* @rskq_accept_head - FIFO head of established children
@@ -128,13 +155,21 @@ struct request_sock_queue {
u8 rskq_defer_accept;
/* 3 bytes hole, try to pack */
struct listen_sock *listen_opt;
+ struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
+ * enabled on this listener. Check
+ * max_qlen != 0 in fastopen_queue
+ * to determine if TFO is enabled
+ * right at this moment.
+ */
};
-extern int reqsk_queue_alloc(struct request_sock_queue *queue,
- unsigned int nr_table_entries);
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+ unsigned int nr_table_entries);
-extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_queue_destroy(struct request_sock_queue *queue);
+void __reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+ bool reset);
static inline struct request_sock *
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -189,25 +224,12 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
return req;
}
-static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
- struct sock *parent)
-{
- struct request_sock *req = reqsk_queue_remove(queue);
- struct sock *child = req->sk;
-
- WARN_ON(child == NULL);
-
- sk_acceptq_removed(parent);
- __reqsk_free(req);
- return child;
-}
-
static inline int reqsk_queue_removed(struct request_sock_queue *queue,
struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
- if (req->retrans == 0)
+ if (req->num_timeout == 0)
--lopt->qlen_young;
return --lopt->qlen;
@@ -245,7 +267,8 @@ static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
struct listen_sock *lopt = queue->listen_opt;
req->expires = jiffies + timeout;
- req->retrans = 0;
+ req->num_retrans = 0;
+ req->num_timeout = 0;
req->sk = NULL;
req->dl_next = lopt->syn_table[hash];
diff --git a/include/net/rose.h b/include/net/rose.h
index 5ba9f02731e..50811fe2c58 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -14,6 +14,12 @@
#define ROSE_MIN_LEN 3
+#define ROSE_CALL_REQ_ADDR_LEN_OFF 3
+#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */
+#define ROSE_CALL_REQ_DEST_ADDR_OFF 4
+#define ROSE_CALL_REQ_SRC_ADDR_OFF 9
+#define ROSE_CALL_REQ_FACILITIES_OFF 14
+
#define ROSE_GFI 0x10
#define ROSE_Q_BIT 0x80
#define ROSE_D_BIT 0x40
@@ -154,38 +160,42 @@ extern int sysctl_rose_routing_control;
extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-extern int rosecmp(rose_address *, rose_address *);
-extern int rosecmpm(rose_address *, rose_address *, unsigned short);
-extern char *rose2asc(char *buf, const rose_address *);
-extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
-extern void rose_kill_by_neigh(struct rose_neigh *);
-extern unsigned int rose_new_lci(struct rose_neigh *);
-extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
-extern void rose_destroy_socket(struct sock *);
+
+int rosecmp(rose_address *, rose_address *);
+int rosecmpm(rose_address *, rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+ struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
/* rose_dev.c */
-extern void rose_setup(struct net_device *);
+void rose_setup(struct net_device *);
/* rose_in.c */
-extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
/* rose_link.c */
-extern void rose_start_ftimer(struct rose_neigh *);
-extern void rose_stop_ftimer(struct rose_neigh *);
-extern void rose_stop_t0timer(struct rose_neigh *);
-extern int rose_ftimer_running(struct rose_neigh *);
-extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
-extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
-extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+ unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+ unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
/* rose_loopback.c */
-extern void rose_loopback_init(void);
-extern void rose_loopback_clear(void);
-extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
/* rose_out.c */
-extern void rose_kick(struct sock *);
-extern void rose_enquiry_response(struct sock *);
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
@@ -193,43 +203,45 @@ extern const struct file_operations rose_neigh_fops;
extern const struct file_operations rose_nodes_fops;
extern const struct file_operations rose_routes_fops;
-extern void rose_add_loopback_neigh(void);
-extern int __must_check rose_add_loopback_node(rose_address *);
-extern void rose_del_loopback_node(rose_address *);
-extern void rose_rt_device_down(struct net_device *);
-extern void rose_link_device_down(struct net_device *);
-extern struct net_device *rose_dev_first(void);
-extern struct net_device *rose_dev_get(rose_address *);
-extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
-extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
-extern int rose_rt_ioctl(unsigned int, void __user *);
-extern void rose_link_failed(ax25_cb *, int);
-extern int rose_route_frame(struct sk_buff *, ax25_cb *);
-extern void rose_rt_free(void);
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(rose_address *);
+void rose_del_loopback_node(rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+ unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
/* rose_subr.c */
-extern void rose_clear_queues(struct sock *);
-extern void rose_frames_acked(struct sock *, unsigned short);
-extern void rose_requeue_frames(struct sock *);
-extern int rose_validate_nr(struct sock *, unsigned short);
-extern void rose_write_internal(struct sock *, int);
-extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
-extern void rose_disconnect(struct sock *, int, int, int);
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+ struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
/* rose_timer.c */
-extern void rose_start_heartbeat(struct sock *);
-extern void rose_start_t1timer(struct sock *);
-extern void rose_start_t2timer(struct sock *);
-extern void rose_start_t3timer(struct sock *);
-extern void rose_start_hbtimer(struct sock *);
-extern void rose_start_idletimer(struct sock *);
-extern void rose_stop_heartbeat(struct sock *);
-extern void rose_stop_timer(struct sock *);
-extern void rose_stop_idletimer(struct sock *);
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
/* sysctl_net_rose.c */
-extern void rose_register_sysctl(void);
-extern void rose_unregister_sysctl(void);
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
#endif
diff --git a/include/net/route.h b/include/net/route.h
index b8c1f7703fc..b17cf28f996 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -30,55 +30,57 @@
#include <net/inet_sock.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
+#include <linux/rcupdate.h>
#include <linux/route.h>
#include <linux/ip.h>
#include <linux/cache.h>
#include <linux/security.h>
-#ifndef __KERNEL__
-#warning This file is not supposed to be used outside of kernel.
-#endif
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
#define RTO_ONLINK 0x01
-#define RTO_CONN 0
-/* RTO_CONN is not used (being alias for 0), but preserved not to break
- * some modules referring to it. */
-
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
struct fib_nh;
-struct inet_peer;
+struct fib_info;
struct rtable {
struct dst_entry dst;
- /* Cache lookup keys */
- struct flowi fl;
-
int rt_genid;
- unsigned rt_flags;
+ unsigned int rt_flags;
__u16 rt_type;
+ __u8 rt_is_input;
+ __u8 rt_uses_gateway;
- __be32 rt_dst; /* Path destination */
- __be32 rt_src; /* Path source */
int rt_iif;
/* Info on neighbour */
__be32 rt_gateway;
/* Miscellaneous cached information */
- __be32 rt_spec_dst; /* RFC1122 specific destination */
- struct inet_peer *peer; /* long-living peer info */
+ u32 rt_pmtu;
+
+ struct list_head rt_uncached;
};
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
+{
+ return rt->rt_is_input != 0;
+}
+
+static inline bool rt_is_output_route(const struct rtable *rt)
{
- return rt->fl.iif != 0;
+ return rt->rt_is_input == 0;
}
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
{
- return rt->fl.iif == 0;
+ if (rt->rt_gateway)
+ return rt->rt_gateway;
+ return daddr;
}
struct ip_rt_acct {
@@ -89,68 +91,118 @@ struct ip_rt_acct {
};
struct rt_cache_stat {
- unsigned int in_hit;
unsigned int in_slow_tot;
unsigned int in_slow_mc;
unsigned int in_no_route;
unsigned int in_brd;
unsigned int in_martian_dst;
unsigned int in_martian_src;
- unsigned int out_hit;
unsigned int out_slow_tot;
unsigned int out_slow_mc;
- unsigned int gc_total;
- unsigned int gc_ignored;
- unsigned int gc_goal_miss;
- unsigned int gc_dst_overflow;
- unsigned int in_hlist_search;
- unsigned int out_hlist_search;
};
extern struct ip_rt_acct __percpu *ip_rt_acct;
struct in_device;
-extern int ip_rt_init(void);
-extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw,
- __be32 src, struct net_device *dev);
-extern void rt_cache_flush(struct net *net, int how);
-extern void rt_cache_flush_batch(void);
-extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp);
-extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp);
-extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
-
-extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin, bool noref);
-static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+ struct sock *sk);
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+ struct dst_entry *dst_orig);
+
+static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
+{
+ return ip_route_output_flow(net, flp, NULL);
+}
+
+static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
+ __be32 saddr, u8 tos, int oif)
+{
+ struct flowi4 fl4 = {
+ .flowi4_oif = oif,
+ .flowi4_tos = tos,
+ .daddr = daddr,
+ .saddr = saddr,
+ };
+ return ip_route_output_key(net, &fl4);
+}
+
+static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4,
+ struct sock *sk,
+ __be32 daddr, __be32 saddr,
+ __be16 dport, __be16 sport,
+ __u8 proto, __u8 tos, int oif)
{
- return ip_route_input_common(skb, dst, src, tos, devin, false);
+ flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
+ RT_SCOPE_UNIVERSE, proto,
+ sk ? inet_sk_flowi_flags(sk) : 0,
+ daddr, saddr, dport, sport);
+ if (sk)
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ return ip_route_output_flow(net, fl4, sk);
}
-static inline int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
+static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4,
+ __be32 daddr, __be32 saddr,
+ __be32 gre_key, __u8 tos, int oif)
{
- return ip_route_input_common(skb, dst, src, tos, devin, true);
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = oif;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_proto = IPPROTO_GRE;
+ fl4->fl4_gre_key = gre_key;
+ return ip_route_output_key(net, fl4);
}
-extern unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, unsigned short new_mtu, struct net_device *dev);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin);
-extern unsigned inet_addr_type(struct net *net, __be32 addr);
-extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
-extern void ip_rt_multicast_event(struct in_device *);
-extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
-extern void ip_rt_get_source(u8 *src, struct rtable *rt);
-extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
+static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin)
+{
+ int err;
+
+ rcu_read_lock();
+ err = ip_route_input_noref(skb, dst, src, tos, devin);
+ if (!err)
+ skb_dst_force(skb);
+ rcu_read_unlock();
+
+ return err;
+}
+
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+ u32 mark, u8 protocol, int flow_flags);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ u8 protocol, int flow_flags);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
+
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+ __be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct in_ifaddr;
-extern void fib_add_ifaddr(struct in_ifaddr *);
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
-static inline void ip_rt_put(struct rtable * rt)
+static inline void ip_rt_put(struct rtable *rt)
{
- if (rt)
- dst_release(&rt->dst);
+ /* dst_release() accepts a NULL parameter.
+ * We rely on dst being first structure in struct rtable
+ */
+ BUILD_BUG_ON(offsetof(struct rtable, dst) != 0);
+ dst_release(&rt->dst);
}
#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
@@ -162,73 +214,103 @@ static inline char rt_tos2priority(u8 tos)
return ip_tos2prio[IPTOS_TOS(tos)>>1];
}
-static inline int ip_route_connect(struct rtable **rp, __be32 dst,
- __be32 src, u32 tos, int oif, u8 protocol,
- __be16 sport, __be16 dport, struct sock *sk,
- int flags)
+/* ip_route_connect() and ip_route_newports() work in tandem whilst
+ * binding a socket for a new outgoing connection.
+ *
+ * In order to use IPSEC properly, we must, in the end, have a
+ * route that was looked up using all available keys including source
+ * and destination ports.
+ *
+ * However, if a source port needs to be allocated (the user specified
+ * a wildcard source port) we need to obtain addressing information
+ * in order to perform that allocation.
+ *
+ * So ip_route_connect() looks up a route using wildcarded source and
+ * destination ports in the key, simply so that we can get a pair of
+ * addresses to use for port allocation.
+ *
+ * Later, once the ports are allocated, ip_route_newports() will make
+ * another route lookup if needed to make sure we catch any IPSEC
+ * rules keyed on the port information.
+ *
+ * The callers allocate the flow key on their stack, and must pass in
+ * the same flowi4 object to both the ip_route_connect() and the
+ * ip_route_newports() calls.
+ */
+
+static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
+ u32 tos, int oif, u8 protocol,
+ __be16 sport, __be16 dport,
+ struct sock *sk)
{
- struct flowi fl = { .oif = oif,
- .mark = sk->sk_mark,
- .fl4_dst = dst,
- .fl4_src = src,
- .fl4_tos = tos,
- .proto = protocol,
- .fl_ip_sport = sport,
- .fl_ip_dport = dport };
- int err;
- struct net *net = sock_net(sk);
+ __u8 flow_flags = 0;
if (inet_sk(sk)->transparent)
- fl.flags |= FLOWI_FLAG_ANYSRC;
+ flow_flags |= FLOWI_FLAG_ANYSRC;
+
+ flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
+ protocol, flow_flags, dst, src, dport, sport);
+}
+
+static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
+ __be32 dst, __be32 src, u32 tos,
+ int oif, u8 protocol,
+ __be16 sport, __be16 dport,
+ struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct rtable *rt;
+
+ ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
+ sport, dport, sk);
if (!dst || !src) {
- err = __ip_route_output_key(net, rp, &fl);
- if (err)
- return err;
- fl.fl4_dst = (*rp)->rt_dst;
- fl.fl4_src = (*rp)->rt_src;
- ip_rt_put(*rp);
- *rp = NULL;
+ rt = __ip_route_output_key(net, fl4);
+ if (IS_ERR(rt))
+ return rt;
+ ip_rt_put(rt);
+ flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
}
- security_sk_classify_flow(sk, &fl);
- return ip_route_output_flow(net, rp, &fl, sk, flags);
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ return ip_route_output_flow(net, fl4, sk);
}
-static inline int ip_route_newports(struct rtable **rp, u8 protocol,
- __be16 sport, __be16 dport, struct sock *sk)
+static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
+ __be16 orig_sport, __be16 orig_dport,
+ __be16 sport, __be16 dport,
+ struct sock *sk)
{
- if (sport != (*rp)->fl.fl_ip_sport ||
- dport != (*rp)->fl.fl_ip_dport) {
- struct flowi fl;
-
- memcpy(&fl, &(*rp)->fl, sizeof(fl));
- fl.fl_ip_sport = sport;
- fl.fl_ip_dport = dport;
- fl.proto = protocol;
- if (inet_sk(sk)->transparent)
- fl.flags |= FLOWI_FLAG_ANYSRC;
- ip_rt_put(*rp);
- *rp = NULL;
- security_sk_classify_flow(sk, &fl);
- return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0);
+ if (sport != orig_sport || dport != orig_dport) {
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+ ip_rt_put(rt);
+ flowi4_update_output(fl4, sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(sk), fl4->daddr,
+ fl4->saddr);
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ return ip_route_output_flow(sock_net(sk), fl4, sk);
}
- return 0;
+ return rt;
}
-extern void rt_bind_peer(struct rtable *rt, int create);
-
-static inline struct inet_peer *rt_get_peer(struct rtable *rt)
+static inline int inet_iif(const struct sk_buff *skb)
{
- if (rt->peer)
- return rt->peer;
+ int iif = skb_rtable(skb)->rt_iif;
- rt_bind_peer(rt, 0);
- return rt->peer;
+ if (iif)
+ return iif;
+ return skb->skb_iif;
}
-static inline int inet_iif(const struct sk_buff *skb)
+extern int sysctl_ip_default_ttl;
+
+static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
{
- return skb_rtable(skb)->rt_iif;
+ int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+
+ if (hoplimit == 0)
+ hoplimit = sysctl_ip_default_ttl;
+ return hoplimit;
}
#endif /* _ROUTE_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 4093ca78cf6..72240e5ac2c 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -4,15 +4,16 @@
#include <linux/rtnetlink.h>
#include <net/netlink.h>
-typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
-extern int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func);
-extern void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func);
-extern int rtnl_unregister(int protocol, int msgtype);
-extern void rtnl_unregister_all(int protocol);
+int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
{
@@ -38,9 +39,13 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @get_size: Function to calculate required room for dumping device
* specific netlink attributes
* @fill_info: Function to dump device specific netlink attributes
- * @get_xstats_size: Function to calculate required room for dumping devic
+ * @get_xstats_size: Function to calculate required room for dumping device
* specific statistics
* @fill_xstats: Function to dump device specific statistics
+ * @get_num_tx_queues: Function to determine number of transmit queues
+ * to create when creating a new device.
+ * @get_num_rx_queues: Function to determine number of receive queues
+ * to create when creating a new device.
*/
struct rtnl_link_ops {
struct list_head list;
@@ -72,16 +77,29 @@ struct rtnl_link_ops {
size_t (*get_xstats_size)(const struct net_device *dev);
int (*fill_xstats)(struct sk_buff *skb,
const struct net_device *dev);
- int (*get_tx_queues)(struct net *net, struct nlattr *tb[],
- unsigned int *tx_queues,
- unsigned int *real_tx_queues);
+ unsigned int (*get_num_tx_queues)(void);
+ unsigned int (*get_num_rx_queues)(void);
+
+ int slave_maxtype;
+ const struct nla_policy *slave_policy;
+ int (*slave_validate)(struct nlattr *tb[],
+ struct nlattr *data[]);
+ int (*slave_changelink)(struct net_device *dev,
+ struct net_device *slave_dev,
+ struct nlattr *tb[],
+ struct nlattr *data[]);
+ size_t (*get_slave_size)(const struct net_device *dev,
+ const struct net_device *slave_dev);
+ int (*fill_slave_info)(struct sk_buff *skb,
+ const struct net_device *dev,
+ const struct net_device *slave_dev);
};
-extern int __rtnl_link_register(struct rtnl_link_ops *ops);
-extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-extern int rtnl_link_register(struct rtnl_link_ops *ops);
-extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
/**
* struct rtnl_af_ops - rtnetlink address family operations
@@ -91,7 +109,7 @@ extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
* @fill_link_af: Function to fill IFLA_AF_SPEC with address family
* specific netlink attributes.
* @get_link_af_size: Function to calculate size of address family specific
- * netlink attributes exlusive the container attribute.
+ * netlink attributes.
* @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
* for invalid configuration settings.
* @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
@@ -111,19 +129,18 @@ struct rtnl_af_ops {
const struct nlattr *attr);
};
-extern int __rtnl_af_register(struct rtnl_af_ops *ops);
-extern void __rtnl_af_unregister(struct rtnl_af_ops *ops);
+void __rtnl_af_unregister(struct rtnl_af_ops *ops);
-extern int rtnl_af_register(struct rtnl_af_ops *ops);
-extern void rtnl_af_unregister(struct rtnl_af_ops *ops);
+void rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+ const struct rtnl_link_ops *ops,
+ struct nlattr *tb[]);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
- char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
-extern int rtnl_configure_link(struct net_device *dev,
- const struct ifinfomsg *ifm);
-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ea1f8a83160..624f9857c83 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -4,7 +4,6 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
-#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
#include <net/gen_stats.h>
@@ -25,16 +24,18 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_THROTTLED,
};
/*
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
- __QDISC___STATE_RUNNING,
+ __QDISC___STATE_RUNNING = 1,
};
struct qdisc_size_table {
+ struct rcu_head rcu;
struct list_head list;
struct tc_sizespec szopts;
int refcnt;
@@ -44,21 +45,25 @@ struct qdisc_size_table {
struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
- unsigned flags;
+ unsigned int flags;
#define TCQ_F_BUILTIN 1
-#define TCQ_F_THROTTLED 2
-#define TCQ_F_INGRESS 4
-#define TCQ_F_CAN_BYPASS 8
-#define TCQ_F_MQROOT 16
+#define TCQ_F_INGRESS 2
+#define TCQ_F_CAN_BYPASS 4
+#define TCQ_F_MQROOT 8
+#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
+ * q->dev_queue : It can test
+ * netif_xmit_frozen_or_stopped() before
+ * dequeueing next packet.
+ * Its true for MQ/MQPRIO slaves, or non
+ * multiqueue device.
+ */
#define TCQ_F_WARN_NONWC (1 << 16)
- int padded;
- struct Qdisc_ops *ops;
- struct qdisc_size_table *stab;
+ u32 limit;
+ const struct Qdisc_ops *ops;
+ struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
u32 parent;
- atomic_t refcnt;
- struct gnet_stats_rate_est rate_est;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);
@@ -69,8 +74,9 @@ struct Qdisc {
*/
struct Qdisc *__parent;
struct netdev_queue *dev_queue;
- struct Qdisc *next_sched;
+ struct gnet_stats_rate_est64 rate_est;
+ struct Qdisc *next_sched;
struct sk_buff *gso_skb;
/*
* For performance sake on SMP, we put highly modified fields at the end
@@ -78,25 +84,46 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned long __state;
+ unsigned int __state;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
- spinlock_t busylock;
+ int padded;
+ atomic_t refcnt;
+
+ spinlock_t busylock ____cacheline_aligned_in_smp;
};
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
- return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ if (qdisc_is_running(qdisc))
+ return false;
+ qdisc->__state |= __QDISC___STATE_RUNNING;
+ return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+ return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+ set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+ clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
}
struct Qdisc_class_ops {
@@ -158,24 +185,26 @@ struct tcf_result {
};
struct tcf_proto_ops {
- struct tcf_proto_ops *next;
+ struct list_head head;
char kind[IFNAMSIZ];
- int (*classify)(struct sk_buff*, struct tcf_proto*,
- struct tcf_result *);
+ int (*classify)(struct sk_buff *,
+ const struct tcf_proto *,
+ struct tcf_result *);
int (*init)(struct tcf_proto*);
void (*destroy)(struct tcf_proto*);
unsigned long (*get)(struct tcf_proto*, u32 handle);
void (*put)(struct tcf_proto*, unsigned long);
- int (*change)(struct tcf_proto*, unsigned long,
+ int (*change)(struct net *net, struct sk_buff *,
+ struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- unsigned long *);
+ unsigned long *, bool);
int (*delete)(struct tcf_proto*, unsigned long);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
/* rtnetlink specific */
- int (*dump)(struct tcf_proto*, unsigned long,
+ int (*dump)(struct net*, struct tcf_proto*, unsigned long,
struct sk_buff *skb, struct tcmsg*);
struct module *owner;
@@ -185,8 +214,9 @@ struct tcf_proto {
/* Fast access part */
struct tcf_proto *next;
void *root;
- int (*classify)(struct sk_buff*, struct tcf_proto*,
- struct tcf_result *);
+ int (*classify)(struct sk_buff *,
+ const struct tcf_proto *,
+ struct tcf_result *);
__be16 protocol;
/* All the rest */
@@ -194,20 +224,30 @@ struct tcf_proto {
u32 classid;
struct Qdisc *q;
void *data;
- struct tcf_proto_ops *ops;
+ const struct tcf_proto_ops *ops;
};
struct qdisc_skb_cb {
unsigned int pkt_len;
- char data[];
+ u16 slave_dev_queue_mapping;
+ u16 _pad;
+ unsigned char data[20];
};
-static inline int qdisc_qlen(struct Qdisc *q)
+static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+{
+ struct qdisc_skb_cb *qcb;
+
+ BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+}
+
+static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
}
@@ -217,12 +257,12 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
return &qdisc->q.lock;
}
-static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
}
-static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
}
@@ -238,7 +278,7 @@ static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
* root. This is enforced by holding the RTNL semaphore, which
* all users of this lock accessor must do.
*/
-static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
@@ -246,7 +286,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
@@ -254,17 +294,17 @@ static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
+static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(struct Qdisc *q)
+static inline void sch_tree_lock(const struct Qdisc *q)
{
spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(struct Qdisc *q)
+static inline void sch_tree_unlock(const struct Qdisc *q)
{
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
@@ -276,6 +316,7 @@ extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
+extern const struct Qdisc_ops *default_qdisc_ops;
struct Qdisc_class_common {
u32 classid;
@@ -297,43 +338,45 @@ static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
}
static inline struct Qdisc_class_common *
-qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
+qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
- struct hlist_node *n;
unsigned int h;
h = qdisc_class_hash(id, hash->hashmask);
- hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
+ hlist_for_each_entry(cl, &hash->hash[h], hnode) {
if (cl->classid == id)
return cl;
}
return NULL;
}
-extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
-extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
-extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
-
-extern void dev_init_scheduler(struct net_device *dev);
-extern void dev_shutdown(struct net_device *dev);
-extern void dev_activate(struct net_device *dev);
-extern void dev_deactivate(struct net_device *dev);
-extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- struct Qdisc *qdisc);
-extern void qdisc_reset(struct Qdisc *qdisc);
-extern void qdisc_destroy(struct Qdisc *qdisc);
-extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops);
-extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
- struct qdisc_size_table *stab);
-extern void tcf_destroy(struct tcf_proto *tp);
-extern void tcf_destroy_chain(struct tcf_proto **fl);
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops, u32 parentid);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
+void tcf_destroy(struct tcf_proto *tp);
+void tcf_destroy_chain(struct tcf_proto **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -370,7 +413,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
}
/* Are any of the TX qdiscs changing? */
-static inline bool qdisc_tx_changing(struct net_device *dev)
+static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
@@ -393,7 +436,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
return true;
}
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
return qdisc_skb_cb(skb)->pkt_len;
}
@@ -410,12 +453,20 @@ enum net_xmit_qdisc_t {
#define net_xmit_drop_count(e) (1)
#endif
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct Qdisc *sch)
{
#ifdef CONFIG_NET_SCHED
- if (sch->stab)
- qdisc_calculate_pkt_len(skb, sch->stab);
+ struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+ if (stab)
+ __qdisc_calculate_pkt_len(skb, stab);
#endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch);
}
@@ -425,10 +476,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+ const struct sk_buff *skb)
+{
+ bstats->bytes += qdisc_pkt_len(skb);
+ bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
{
- sch->bstats.bytes += len;
- sch->bstats.packets++;
+ bstats_update(&sch->bstats, skb);
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -436,7 +495,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
return NET_XMIT_SUCCESS;
}
@@ -451,8 +509,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
{
struct sk_buff *skb = __skb_dequeue(list);
- if (likely(skb != NULL))
+ if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ }
return skb;
}
@@ -465,10 +525,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list)
{
- struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+ struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
+ sch->qstats.backlog -= len;
kfree_skb(skb);
return len;
}
@@ -610,11 +671,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
{
struct sk_buff *n;
- if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
- !skb_shared(skb))
- n = skb_get(skb);
- else
- n = skb_clone(skb, gfp_mask);
+ n = skb_clone(skb, gfp_mask);
if (n) {
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
@@ -625,4 +682,42 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
}
#endif
+struct psched_ratecfg {
+ u64 rate_bytes_ps; /* bytes per second */
+ u32 mult;
+ u16 overhead;
+ u8 linklayer;
+ u8 shift;
+};
+
+static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
+ unsigned int len)
+{
+ len += r->overhead;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
+}
+
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf,
+ u64 rate64);
+
+static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
+ const struct psched_ratecfg *r)
+{
+ memset(res, 0, sizeof(*res));
+
+ /* legacy struct tc_ratespec has a 32bit @rate field
+ * Qdisc using 64bit rate should add new attributes
+ * in order to maintain compatibility.
+ */
+ res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
+ res->overhead = r->overhead;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
+}
+
#endif
diff --git a/include/net/scm.h b/include/net/scm.h
index 745460fa2f0..262532d111f 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -12,8 +12,13 @@
*/
#define SCM_MAX_FD 253
+struct scm_creds {
+ u32 pid;
+ kuid_t uid;
+ kgid_t gid;
+};
+
struct scm_fp_list {
- struct list_head list;
short count;
short max;
struct file *fp[SCM_MAX_FD];
@@ -21,19 +26,18 @@ struct scm_fp_list {
struct scm_cookie {
struct pid *pid; /* Skb credentials */
- const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
- struct ucred creds; /* Skb credentials */
+ struct scm_creds creds; /* Skb credentials */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Passed security ID */
#endif
};
-extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
-extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
-extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
-extern void __scm_destroy(struct scm_cookie *scm);
-extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
#ifdef CONFIG_SECURITY_NETWORK
static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
@@ -46,35 +50,35 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co
#endif /* CONFIG_SECURITY_NETWORK */
static __inline__ void scm_set_cred(struct scm_cookie *scm,
- struct pid *pid, const struct cred *cred)
+ struct pid *pid, kuid_t uid, kgid_t gid)
{
scm->pid = get_pid(pid);
- scm->cred = get_cred(cred);
- cred_to_ucred(pid, cred, &scm->creds);
+ scm->creds.pid = pid_vnr(pid);
+ scm->creds.uid = uid;
+ scm->creds.gid = gid;
}
static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
{
put_pid(scm->pid);
scm->pid = NULL;
-
- if (scm->cred)
- put_cred(scm->cred);
- scm->cred = NULL;
}
static __inline__ void scm_destroy(struct scm_cookie *scm)
{
scm_destroy_cred(scm);
- if (scm && scm->fp)
+ if (scm->fp)
__scm_destroy(scm);
}
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm)
+ struct scm_cookie *scm, bool forcecreds)
{
- scm_set_cred(scm, task_tgid(current), current_cred());
- scm->fp = NULL;
+ memset(scm, 0, sizeof(*scm));
+ scm->creds.uid = INVALID_UID;
+ scm->creds.gid = INVALID_GID;
+ if (forcecreds)
+ scm_set_cred(scm, task_tgid(current), current_uid(), current_gid());
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
@@ -112,8 +116,15 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
return;
}
- if (test_bit(SOCK_PASSCRED, &sock->flags))
- put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+ if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ struct user_namespace *current_ns = current_user_ns();
+ struct ucred ucreds = {
+ .pid = scm->creds.pid,
+ .uid = from_kuid_munged(current_ns, scm->creds.uid),
+ .gid = from_kgid_munged(current_ns, scm->creds.gid),
+ };
+ put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
+ }
scm_destroy_cred(scm);
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 49bc9577c61..f2d58aa37a6 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -16,22 +16,15 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_auth_h__
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index befc8d2a1b9..4a5b9a306c6 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -19,16 +19,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Dinakaran Joseph
@@ -37,47 +33,46 @@
*
* Rewritten to use libcrc32c by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
+#ifndef __sctp_checksum_h__
+#define __sctp_checksum_h__
+
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <linux/crc32c.h>
+#include <linux/crc32.h>
-static inline __u32 sctp_crc32c(__u32 crc, u8 *buffer, u16 length)
+static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
{
- return crc32c(crc, buffer, length);
-}
-
-static inline __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
-{
- __u32 crc = ~(__u32)0;
- __u8 zero[sizeof(__u32)] = {0};
-
- /* Optimize this routine to be SCTP specific, knowing how
- * to skip the checksum field of the SCTP header.
+ /* This uses the crypto implementation of crc32c, which is either
+ * implemented w/ hardware support or resolves to __crc32c_le().
*/
-
- /* Calculate CRC up to the checksum. */
- crc = sctp_crc32c(crc, buffer, sizeof(struct sctphdr) - sizeof(__u32));
-
- /* Skip checksum field of the header. */
- crc = sctp_crc32c(crc, zero, sizeof(__u32));
-
- /* Calculate the rest of the CRC. */
- crc = sctp_crc32c(crc, &buffer[sizeof(struct sctphdr)],
- length - sizeof(struct sctphdr));
- return crc;
+ return crc32c(sum, buff, len);
}
-static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
+static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
{
- return sctp_crc32c(crc32, buffer, length);
+ return __crc32c_le_combine(csum, csum2, len);
}
-static inline __le32 sctp_end_cksum(__be32 crc32)
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
{
- return cpu_to_le32(~crc32);
+ struct sctphdr *sh = sctp_hdr(skb);
+ __le32 ret, old = sh->checksum;
+ const struct skb_checksum_ops ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+ };
+
+ sh->checksum = 0;
+ ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
+ ~(__u32)0, &ops));
+ sh->checksum = old;
+
+ return ret;
}
+
+#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index c01dc99def0..4b7cd695e43 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -19,23 +19,20 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
- * Please send any bug reports or fixes you make to one of the
- * following email addresses:
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
- * La Monte H.P. Yarroll <piggy@acm.org>
- * Karl Knutson <karl@athena.chicago.il.us>
- * Ardelle Fan <ardelle.fan@intel.com>
- * Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
*/
-
#ifndef __net_sctp_command_h__
#define __net_sctp_command_h__
@@ -63,6 +60,7 @@ typedef enum {
SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
SCTP_CMD_TIMER_STOP, /* Stop a timer. */
SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
@@ -73,7 +71,6 @@ typedef enum {
SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
- SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
@@ -108,6 +105,8 @@ typedef enum {
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
+ SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+ SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
} sctp_verb_t;
@@ -128,8 +127,6 @@ typedef union {
__be16 err;
sctp_state_t state;
sctp_event_timeout_t to;
- unsigned long zero;
- void *ptr;
struct sctp_chunk *chunk;
struct sctp_association *asoc;
struct sctp_transport *transport;
@@ -152,23 +149,15 @@ typedef union {
* which takes an __s32 and returns a sctp_arg_t containing the
* __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg.
*/
-static inline sctp_arg_t SCTP_NULL(void)
-{
- sctp_arg_t retval; retval.ptr = NULL; return retval;
-}
-static inline sctp_arg_t SCTP_NOFORCE(void)
-{
- sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 0; return retval;
-}
-static inline sctp_arg_t SCTP_FORCE(void)
-{
- sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 1; return retval;
-}
#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
static inline sctp_arg_t \
SCTP_## name (type arg) \
-{ sctp_arg_t retval = {.zero = 0UL}; retval.elt = arg; return retval; }
+{ sctp_arg_t retval;\
+ memset(&retval, 0, sizeof(sctp_arg_t));\
+ retval.elt = arg;\
+ return retval;\
+}
SCTP_ARG_CONSTRUCTOR(I32, __s32, i32)
SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
@@ -179,7 +168,6 @@ SCTP_ARG_CONSTRUCTOR(ERROR, int, error)
SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */
SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state)
SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to)
-SCTP_ARG_CONSTRUCTOR(PTR, void *, ptr)
SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk)
SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc)
SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
@@ -190,6 +178,23 @@ SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet)
SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg)
+static inline sctp_arg_t SCTP_FORCE(void)
+{
+ return SCTP_I32(1);
+}
+
+static inline sctp_arg_t SCTP_NOFORCE(void)
+{
+ return SCTP_I32(0);
+}
+
+static inline sctp_arg_t SCTP_NULL(void)
+{
+ sctp_arg_t retval;
+ memset(&retval, 0, sizeof(sctp_arg_t));
+ return retval;
+}
+
typedef struct {
sctp_arg_t obj;
sctp_verb_t verb;
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index c70d8ccc55c..307728f622e 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -19,16 +19,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -39,9 +35,6 @@
* Xingang Guo <xingang.guo@intel.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_constants_h__
@@ -49,7 +42,6 @@
#include <linux/sctp.h>
#include <linux/ipv6.h> /* For ipv6hdr. */
-#include <net/sctp/user.h>
#include <net/tcp_states.h> /* For TCP states used in sctp_sock_state_t */
/* Value used for stream negotiation. */
@@ -150,7 +142,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other)
SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
-#define sctp_chunk_is_control(a) (a->chunk_hdr->type != SCTP_CID_DATA)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
/* Calculate the actual data size in a data chunk */
@@ -188,15 +179,14 @@ typedef enum {
/* SCTP state defines for internal state machine */
typedef enum {
- SCTP_STATE_EMPTY = 0,
- SCTP_STATE_CLOSED = 1,
- SCTP_STATE_COOKIE_WAIT = 2,
- SCTP_STATE_COOKIE_ECHOED = 3,
- SCTP_STATE_ESTABLISHED = 4,
- SCTP_STATE_SHUTDOWN_PENDING = 5,
- SCTP_STATE_SHUTDOWN_SENT = 6,
- SCTP_STATE_SHUTDOWN_RECEIVED = 7,
- SCTP_STATE_SHUTDOWN_ACK_SENT = 8,
+ SCTP_STATE_CLOSED = 0,
+ SCTP_STATE_COOKIE_WAIT = 1,
+ SCTP_STATE_COOKIE_ECHOED = 2,
+ SCTP_STATE_ESTABLISHED = 3,
+ SCTP_STATE_SHUTDOWN_PENDING = 4,
+ SCTP_STATE_SHUTDOWN_SENT = 5,
+ SCTP_STATE_SHUTDOWN_RECEIVED = 6,
+ SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
} sctp_state_t;
@@ -305,7 +295,7 @@ enum { SCTP_MAX_GABS = 16 };
* to which we will raise the P-MTU.
*/
#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
-#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */
+
#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
@@ -314,14 +304,6 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
-#if defined (CONFIG_SCTP_HMAC_MD5)
-#define SCTP_COOKIE_HMAC_ALG "hmac(md5)"
-#elif defined (CONFIG_SCTP_HMAC_SHA1)
-#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)"
-#else
-#define SCTP_COOKIE_HMAC_ALG NULL
-#endif
-
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
@@ -336,6 +318,7 @@ typedef enum {
typedef enum {
SCTP_TRANSPORT_UP,
SCTP_TRANSPORT_DOWN,
+ SCTP_TRANSPORT_PF,
} sctp_transport_cmd_t;
/* These are the address scopes defined mainly for IPv4 addresses
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 505845ddb0b..8e4de46c052 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -21,16 +21,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +37,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __net_sctp_h__
@@ -71,7 +64,7 @@
#include <linux/jiffies.h>
#include <linux/idr.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_route.h>
#endif
@@ -83,30 +76,12 @@
#include <net/sctp/structs.h>
#include <net/sctp/constants.h>
-
-/* Set SCTP_DEBUG flag via config if not already set. */
-#ifndef SCTP_DEBUG
-#ifdef CONFIG_SCTP_DBG_MSG
-#define SCTP_DEBUG 1
-#else
-#define SCTP_DEBUG 0
-#endif /* CONFIG_SCTP_DBG */
-#endif /* SCTP_DEBUG */
-
#ifdef CONFIG_IP_SCTP_MODULE
#define SCTP_PROTOSW_FLAG 0
#else /* static! */
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
-
-/* Certain internal static functions need to be exported when
- * compiled into the test frame.
- */
-#ifndef SCTP_STATIC
-#define SCTP_STATIC static
-#endif
-
/*
* Function declarations.
*/
@@ -114,13 +89,11 @@
/*
* sctp/protocol.c
*/
-extern struct sock *sctp_get_ctl_sock(void);
-extern void sctp_local_addr_free(struct rcu_head *head);
-extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
- sctp_scope_t, gfp_t gfp,
- int flags);
-extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
-extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
+int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
+ sctp_scope_t, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
@@ -128,23 +101,24 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
-void sctp_data_ready(struct sock *sk, int len);
+void sctp_data_ready(struct sock *sk);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
extern struct percpu_counter sctp_sockets_allocated;
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
/*
* sctp/primitive.c
*/
-int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
-int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
-int sctp_primitive_ABORT(struct sctp_association *, void *arg);
-int sctp_primitive_SEND(struct sctp_association *, void *arg);
-int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
-int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
/*
* sctp/input.c
@@ -155,12 +129,14 @@ void sctp_hash_established(struct sctp_association *);
void sctp_unhash_established(struct sctp_association *);
void sctp_hash_endpoint(struct sctp_endpoint *);
void sctp_unhash_endpoint(struct sctp_endpoint *);
-struct sock *sctp_err_lookup(int family, struct sk_buff *,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
void sctp_err_finish(struct sock *, struct sctp_association *);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
+void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
+ struct sk_buff *);
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
@@ -170,14 +146,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
/*
* sctp/proc.c
*/
-int sctp_snmp_proc_init(void);
-void sctp_snmp_proc_exit(void);
-int sctp_eps_proc_init(void);
-void sctp_eps_proc_exit(void);
-int sctp_assocs_proc_init(void);
-void sctp_assocs_proc_exit(void);
-int sctp_remaddr_proc_init(void);
-void sctp_remaddr_proc_exit(void);
+int sctp_snmp_proc_init(struct net *net);
+void sctp_snmp_proc_exit(struct net *net);
+int sctp_eps_proc_init(struct net *net);
+void sctp_eps_proc_exit(struct net *net);
+int sctp_assocs_proc_init(struct net *net);
+void sctp_assocs_proc_exit(struct net *net);
+int sctp_remaddr_proc_init(struct net *net);
+void sctp_remaddr_proc_exit(struct net *net);
/*
@@ -194,38 +170,11 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
* Section: Macros, externs, and inlines
*/
-
-#ifdef TEST_FRAME
-#include <test_frame.h>
-#else
-
-/* spin lock wrappers. */
-#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
-#define sctp_spin_unlock_irqrestore(lock, flags) \
- spin_unlock_irqrestore(lock, flags)
-#define sctp_local_bh_disable() local_bh_disable()
-#define sctp_local_bh_enable() local_bh_enable()
-#define sctp_spin_lock(lock) spin_lock(lock)
-#define sctp_spin_unlock(lock) spin_unlock(lock)
-#define sctp_write_lock(lock) write_lock(lock)
-#define sctp_write_unlock(lock) write_unlock(lock)
-#define sctp_read_lock(lock) read_lock(lock)
-#define sctp_read_unlock(lock) read_unlock(lock)
-
-/* sock lock wrappers. */
-#define sctp_lock_sock(sk) lock_sock(sk)
-#define sctp_release_sock(sk) release_sock(sk)
-#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
-#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
-
/* SCTP SNMP MIB stats handlers */
-DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
-#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
-#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
-#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
-#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field)
-
-#endif /* !TEST_FRAME */
+#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
/* sctp mib definitions */
enum {
@@ -271,60 +220,18 @@ struct sctp_mib {
unsigned long mibs[SCTP_MIB_MAX];
};
-
-/* Print debugging messages. */
-#if SCTP_DEBUG
-extern int sctp_debug_flag;
-#define SCTP_DEBUG_PRINTK(fmt, args...) \
-do { \
- if (sctp_debug_flag) \
- printk(KERN_DEBUG pr_fmt(fmt), ##args); \
-} while (0)
-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) \
-do { \
- if (sctp_debug_flag) \
- pr_cont(fmt, ##args); \
-} while (0)
-#define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \
- args_lead, saddr, args_trail...) \
-do { \
- if (sctp_debug_flag) { \
- if (saddr->sa.sa_family == AF_INET6) { \
- printk(KERN_DEBUG \
- pr_fmt(fmt_lead "%pI6" fmt_trail), \
- args_lead, \
- &saddr->v6.sin6_addr, \
- args_trail); \
- } else { \
- printk(KERN_DEBUG \
- pr_fmt(fmt_lead "%pI4" fmt_trail), \
- args_lead, \
- &saddr->v4.sin_addr.s_addr, \
- args_trail); \
- } \
- } \
-} while (0)
-#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; }
-#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; }
-
-#define SCTP_ASSERT(expr, str, func) \
- if (!(expr)) { \
- SCTP_DEBUG_PRINTK("Assertion Failed: %s(%s) at %s:%s:%d\n", \
- str, (#expr), __FILE__, __func__, __LINE__); \
- func; \
+/* helper function to track stats about max rto and related transport */
+static inline void sctp_max_rto(struct sctp_association *asoc,
+ struct sctp_transport *trans)
+{
+ if (asoc->stats.max_obs_rto < (__u64)trans->rto) {
+ asoc->stats.max_obs_rto = trans->rto;
+ memset(&asoc->stats.obs_rto_ipaddr, 0,
+ sizeof(struct sockaddr_storage));
+ memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr,
+ trans->af_specific->sockaddr_len);
}
-
-#else /* SCTP_DEBUG */
-
-#define SCTP_DEBUG_PRINTK(whatever...)
-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
-#define SCTP_ENABLE_DEBUG
-#define SCTP_DISABLE_DEBUG
-#define SCTP_ASSERT(expr, str, func)
-
-#endif /* SCTP_DEBUG */
-
+}
/*
* Macros for keeping a global reference of object allocations.
@@ -357,31 +264,35 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
#define SCTP_DBG_OBJCNT_ENTRY(name) \
{.label= #name, .counter= &sctp_dbg_objcnt_## name}
-void sctp_dbg_objcnt_init(void);
-void sctp_dbg_objcnt_exit(void);
+void sctp_dbg_objcnt_init(struct net *);
+void sctp_dbg_objcnt_exit(struct net *);
#else
#define SCTP_DBG_OBJCNT_INC(name)
#define SCTP_DBG_OBJCNT_DEC(name)
-static inline void sctp_dbg_objcnt_init(void) { return; }
-static inline void sctp_dbg_objcnt_exit(void) { return; }
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
#if defined CONFIG_SYSCTL
void sctp_sysctl_register(void);
void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
#else
static inline void sctp_sysctl_register(void) { return; }
static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
#endif
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
void sctp_v6_pf_init(void);
void sctp_v6_pf_exit(void);
@@ -411,6 +322,7 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
@@ -422,13 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
{
unsigned long flags;
- sctp_spin_lock_irqsave(&head->lock, flags);
- sctp_spin_lock(&list->lock);
+ spin_lock_irqsave(&head->lock, flags);
+ spin_lock(&list->lock);
skb_queue_splice_tail_init(list, head);
- sctp_spin_unlock(&list->lock);
- sctp_spin_unlock_irqrestore(&head->lock, flags);
+ spin_unlock(&list->lock);
+ spin_unlock_irqrestore(&head->lock, flags);
}
/**
@@ -514,10 +426,10 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
return frag;
}
-static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
{
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
asoc->pmtu_pending = 0;
}
@@ -531,7 +443,6 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
- pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
pos.v += WORD_ROUND(ntohs(pos.p->length)))
@@ -542,7 +453,6 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
- (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
@@ -558,32 +468,10 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
/* Round an int up to the next multiple of 4. */
#define WORD_ROUND(s) (((s)+3)&~3)
-/* Make a new instance of type. */
-#define t_new(type, flags) (type *)kzalloc(sizeof(type), flags)
-
-/* Compare two timevals. */
-#define tv_lt(s, t) \
- (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec))
-
-/* Add tv1 to tv2. */
-#define TIMEVAL_ADD(tv1, tv2) \
-({ \
- suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \
- time_t secs = (tv2).tv_sec + (tv1).tv_sec; \
-\
- if (usecs >= 1000000) { \
- usecs -= 1000000; \
- secs++; \
- } \
- (tv2).tv_sec = secs; \
- (tv2).tv_usec = usecs; \
-})
-
/* External references. */
extern struct proto sctp_prot;
extern struct proto sctpv6_prot;
-extern struct proc_dir_entry *proc_net_sctp;
void sctp_put_port(struct sock *sk);
extern struct idr sctp_assocs_id;
@@ -601,7 +489,7 @@ static inline int ipver2af(__u8 ipver)
return AF_INET6;
default:
return 0;
- };
+ }
}
/* Convert from an address parameter type to an address family. */
@@ -614,36 +502,26 @@ static inline int param_type2af(__be16 type)
return AF_INET6;
default:
return 0;
- };
-}
-
-/* Perform some sanity checks. */
-static inline int sctp_sanity_check(void)
-{
- SCTP_ASSERT(sizeof(struct sctp_ulpevent) <=
- sizeof(((struct sk_buff *)0)->cb),
- "SCTP: ulpevent does not fit in skb!\n", return 0);
-
- return 1;
+ }
}
/* Warning: The following hash functions assume a power of two 'size'. */
/* This is the hash function for the SCTP port hash table. */
-static inline int sctp_phashfn(__u16 lport)
+static inline int sctp_phashfn(struct net *net, __u16 lport)
{
- return lport & (sctp_port_hashsize - 1);
+ return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
}
/* This is the hash function for the endpoint hash table. */
-static inline int sctp_ep_hashfn(__u16 lport)
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
{
- return lport & (sctp_ep_hashsize - 1);
+ return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
}
/* This is the hash function for the association hash table. */
-static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
{
- int h = (lport << 16) + rport;
+ int h = (lport << 16) + rport + net_hash_mix(net);
h ^= h>>8;
return h & (sctp_assoc_hashsize - 1);
}
@@ -659,8 +537,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
return h & (sctp_assoc_hashsize - 1);
}
-#define sctp_for_each_hentry(epb, node, head) \
- hlist_for_each_entry(epb, node, head, node)
+#define sctp_for_each_hentry(epb, head) \
+ hlist_for_each_entry(epb, head, node)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
@@ -703,4 +581,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
}
+/* The cookie is always 0 since this is how it's used in the
+ * pmtu code.
+ */
+static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+{
+ if (t->dst && !dst_check(t->dst, t->dst_cookie)) {
+ dst_release(t->dst);
+ t->dst = NULL;
+ }
+
+ return t->dst;
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9352d12f02d..7f4eeb340a5 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -21,16 +21,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +38,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -77,7 +70,8 @@ typedef struct {
int action;
} sctp_sm_command_t;
-typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
+ const struct sctp_endpoint *,
const struct sctp_association *,
const sctp_subtype_t type,
void *arg,
@@ -165,6 +159,7 @@ sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
sctp_state_fn_t sctp_sf_do_prm_asconf;
/* Prototypes for other event state functions. */
+sctp_state_fn_t sctp_sf_do_no_pending_tsn;
sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack;
sctp_state_fn_t sctp_sf_ignore_other;
@@ -177,7 +172,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
/* Prototypes for utility support functions. */
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
+ sctp_event_t,
sctp_state_t,
sctp_subtype_t);
int sctp_chunk_iif(const struct sctp_chunk *);
@@ -231,10 +227,10 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *,
const struct sctp_chunk *,
struct sctp_paramhdr *);
+struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *,
+ const struct sctp_chunk *);
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *,
- const struct sctp_transport *,
- const void *payload,
- const size_t paylen);
+ const struct sctp_transport *);
struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *,
const struct sctp_chunk *,
const void *payload,
@@ -269,7 +265,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *);
/* Prototypes for statetable processing. */
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
sctp_state_t state,
struct sctp_endpoint *,
struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index cc9185ca8fd..f38588bf346 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -19,16 +19,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Randall Stewart <randall@sctp.chicago.il.us>
@@ -46,21 +42,18 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_structs_h__
#define __sctp_structs_h__
-#include <linux/time.h> /* We get struct timespec. */
+#include <linux/ktime.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
#include <linux/in6.h> /* We get struct in6_addr */
#include <linux/ipv6.h>
#include <asm/param.h> /* We get MAXHOSTNAMELEN. */
-#include <asm/atomic.h> /* This gets us atomic counters. */
+#include <linux/atomic.h> /* This gets us atomic counters. */
#include <linux/skbuff.h> /* We need sk_buff_head. */
#include <linux/workqueue.h> /* We need tq_struct. */
#include <linux/sctp.h> /* We need sctp* header structs. */
@@ -102,6 +95,7 @@ struct sctp_bind_bucket {
unsigned short fastreuse;
struct hlist_node node;
struct hlist_head owner;
+ struct net *net;
};
struct sctp_bind_hashbucket {
@@ -118,141 +112,32 @@ struct sctp_hashbucket {
/* The SCTP globals structure. */
extern struct sctp_globals {
- /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
- *
- * The following protocol parameters are RECOMMENDED:
- *
- * RTO.Initial - 3 seconds
- * RTO.Min - 1 second
- * RTO.Max - 60 seconds
- * RTO.Alpha - 1/8 (3 when converted to right shifts.)
- * RTO.Beta - 1/4 (2 when converted to right shifts.)
- */
- unsigned int rto_initial;
- unsigned int rto_min;
- unsigned int rto_max;
-
- /* Note: rto_alpha and rto_beta are really defined as inverse
- * powers of two to facilitate integer operations.
- */
- int rto_alpha;
- int rto_beta;
-
- /* Max.Burst - 4 */
- int max_burst;
-
- /* Whether Cookie Preservative is enabled(1) or not(0) */
- int cookie_preserve_enable;
-
- /* Valid.Cookie.Life - 60 seconds */
- unsigned int valid_cookie_life;
-
- /* Delayed SACK timeout 200ms default*/
- unsigned int sack_timeout;
-
- /* HB.interval - 30 seconds */
- unsigned int hb_interval;
-
- /* Association.Max.Retrans - 10 attempts
- * Path.Max.Retrans - 5 attempts (per destination address)
- * Max.Init.Retransmits - 8 attempts
- */
- int max_retrans_association;
- int max_retrans_path;
- int max_retrans_init;
-
- /*
- * Policy for preforming sctp/socket accounting
- * 0 - do socket level accounting, all assocs share sk_sndbuf
- * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
- */
- int sndbuf_policy;
-
- /*
- * Policy for preforming sctp/socket accounting
- * 0 - do socket level accounting, all assocs share sk_rcvbuf
- * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
- */
- int rcvbuf_policy;
-
- /* The following variables are implementation specific. */
-
- /* Default initialization values to be applied to new associations. */
- __u16 max_instreams;
- __u16 max_outstreams;
-
/* This is a list of groups of functions for each address
* family that we support.
*/
struct list_head address_families;
/* This is the hash of all endpoints. */
- int ep_hashsize;
struct sctp_hashbucket *ep_hashtable;
-
/* This is the hash of all associations. */
- int assoc_hashsize;
struct sctp_hashbucket *assoc_hashtable;
-
/* This is the sctp port control hash. */
- int port_hashsize;
struct sctp_bind_hashbucket *port_hashtable;
- /* This is the global local address list.
- * We actively maintain this complete list of addresses on
- * the system by catching address add/delete events.
- *
- * It is a list of sctp_sockaddr_entry.
- */
- struct list_head local_addr_list;
-
- /* Lock that protects the local_addr_list writers */
- spinlock_t addr_list_lock;
-
- /* Flag to indicate if addip is enabled. */
- int addip_enable;
- int addip_noauth_enable;
-
- /* Flag to indicate if PR-SCTP is enabled. */
- int prsctp_enable;
-
- /* Flag to idicate if SCTP-AUTH is enabled */
- int auth_enable;
+ /* Sizes of above hashtables. */
+ int ep_hashsize;
+ int assoc_hashsize;
+ int port_hashsize;
- /*
- * Policy to control SCTP IPv4 address scoping
- * 0 - Disable IPv4 address scoping
- * 1 - Enable IPv4 address scoping
- * 2 - Selectively allow only IPv4 private addresses
- * 3 - Selectively allow only IPv4 link local address
- */
- int ipv4_scope_policy;
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
/* Flag to indicate whether computing and verifying checksum
* is disabled. */
- int checksum_disable;
-
- /* Threshold for rwnd update SACKS. Receive buffer shifted this many
- * bits is an indicator of when to send and window update SACK.
- */
- int rwnd_update_shift;
+ bool checksum_disable;
} sctp_globals;
-#define sctp_rto_initial (sctp_globals.rto_initial)
-#define sctp_rto_min (sctp_globals.rto_min)
-#define sctp_rto_max (sctp_globals.rto_max)
-#define sctp_rto_alpha (sctp_globals.rto_alpha)
-#define sctp_rto_beta (sctp_globals.rto_beta)
-#define sctp_max_burst (sctp_globals.max_burst)
-#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life)
-#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
-#define sctp_max_retrans_association (sctp_globals.max_retrans_association)
-#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
-#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy)
-#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
-#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
-#define sctp_sack_timeout (sctp_globals.sack_timeout)
-#define sctp_hb_interval (sctp_globals.hb_interval)
#define sctp_max_instreams (sctp_globals.max_instreams)
#define sctp_max_outstreams (sctp_globals.max_outstreams)
#define sctp_address_families (sctp_globals.address_families)
@@ -262,15 +147,7 @@ extern struct sctp_globals {
#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
#define sctp_port_hashsize (sctp_globals.port_hashsize)
#define sctp_port_hashtable (sctp_globals.port_hashtable)
-#define sctp_local_addr_list (sctp_globals.local_addr_list)
-#define sctp_local_addr_lock (sctp_globals.addr_list_lock)
-#define sctp_scope_policy (sctp_globals.ipv4_scope_policy)
-#define sctp_addip_enable (sctp_globals.addip_enable)
-#define sctp_addip_noauth (sctp_globals.addip_noauth_enable)
-#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
-#define sctp_auth_enable (sctp_globals.auth_enable)
#define sctp_checksum_disable (sctp_globals.checksum_disable)
-#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
@@ -291,6 +168,7 @@ struct sctp_sock {
/* Access to HMAC transform. */
struct crypto_hash *hmac;
+ char *sctp_hmac_alg;
/* What is our base endpointer? */
struct sctp_endpoint *ep;
@@ -341,6 +219,8 @@ struct sctp_sock {
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
+ struct list_head auto_asconf_list;
+ int do_auto_asconf;
};
static inline struct sctp_sock *sctp_sk(const struct sock *sk)
@@ -353,7 +233,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
return (struct sock *)sp;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct sctp6_sock {
struct sctp_sock sctp;
struct ipv6_pinfo inet6;
@@ -395,7 +275,7 @@ struct sctp_cookie {
__u32 peer_ttag;
/* When does this cookie expire? */
- struct timeval expiration;
+ ktime_t expiration;
/* Number of inbound/outbound streams which are set
* and negotiated during the INIT process.
@@ -422,7 +302,7 @@ struct sctp_cookie {
__u32 adaptation_ind;
__u8 auth_random[sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH];
- __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS + 2];
+ __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS * sizeof(__u16) + 2];
__u8 auth_chunks[sizeof(sctp_paramhdr_t) + SCTP_AUTH_MAX_CHUNKS];
/* This is a shim for my peer's INIT packet, followed by
@@ -510,7 +390,6 @@ struct sctp_stream {
struct sctp_ssnmap {
struct sctp_stream in;
struct sctp_stream out;
- int malloced;
};
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
@@ -564,19 +443,15 @@ struct sctp_af {
int optname,
char __user *optval,
int __user *optlen);
- struct dst_entry *(*get_dst) (struct sctp_association *asoc,
- union sctp_addr *daddr,
- union sctp_addr *saddr);
+ void (*get_dst) (struct sctp_transport *t,
+ union sctp_addr *saddr,
+ struct flowi *fl,
+ struct sock *sk);
void (*get_saddr) (struct sctp_sock *sk,
- struct sctp_association *asoc,
- struct dst_entry *dst,
- union sctp_addr *daddr,
- union sctp_addr *saddr);
+ struct sctp_transport *t,
+ struct flowi *fl);
void (*copy_addrlist) (struct list_head *,
struct net_device *);
- void (*dst_saddr) (union sctp_addr *saddr,
- struct dst_entry *dst,
- __be16 port);
int (*cmp_addr) (const union sctp_addr *addr1,
const union sctp_addr *addr2);
void (*addr_copy) (union sctp_addr *dst,
@@ -753,6 +628,7 @@ struct sctp_chunk {
#define SCTP_NEED_FRTX 0x1
#define SCTP_DONT_FRTX 0x2
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
+ resent:1, /* Has this chunk ever been resent. */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
singleton:1, /* Only chunk in the packet? */
@@ -773,7 +649,6 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
-void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
struct sock *);
@@ -796,6 +671,8 @@ struct sctp_sockaddr_entry {
__u8 valid;
};
+#define SCTP_ADDRESS_TICK_DELAY 500
+
typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
/* This structure holds lists of chunks as we are assembling for
@@ -828,8 +705,7 @@ struct sctp_packet {
has_sack:1, /* This packet contains a SACK chunk. */
has_auth:1, /* This packet contains an AUTH chunk */
has_data:1, /* This packet contains at least 1 DATA chunk */
- ipfragok:1, /* So let ip fragment this packet */
- malloced:1; /* Is it malloced? */
+ ipfragok:1; /* So let ip fragment this packet */
};
struct sctp_packet *sctp_packet_init(struct sctp_packet *,
@@ -893,11 +769,13 @@ struct sctp_transport {
hb_sent:1,
/* Is the Path MTU update pending on this tranport */
- pmtu_pending:1,
+ pmtu_pending:1;
- /* Is this structure kfree()able? */
- malloced:1;
+ /* Has this transport moved the ctsn since we last sacked */
+ __u32 sack_generation;
+ u32 dst_cookie;
+ struct flowi fl;
/* This is the peer's IP address and port. */
union sctp_addr ipaddr;
@@ -960,10 +838,10 @@ struct sctp_transport {
unsigned long sackdelay;
__u32 sackfreq;
- /* When was the last time (in jiffies) that we heard from this
- * transport? We use this to pick new active and retran paths.
+ /* When was the last time that we heard from this transport? We use
+ * this to pick new active and retran paths.
*/
- unsigned long last_time_heard;
+ ktime_t last_time_heard;
/* Last time(in jiffies) when cwnd is reduced due to the congestion
* indication based on ECNE chunk.
@@ -972,10 +850,15 @@ struct sctp_transport {
/* This is the max_retrans value for the transport and will
* be initialized from the assocs value. This can be changed
- * using SCTP_SET_PEER_ADDR_PARAMS socket option.
+ * using the SCTP_SET_PEER_ADDR_PARAMS socket option.
*/
__u16 pathmaxrxt;
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be changed
+ * using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ int pf_retrans;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1053,15 +936,17 @@ struct sctp_transport {
/* 64-bit random number sent with heartbeat. */
__u64 hb_nonce;
+
+ struct rcu_head rcu;
};
-struct sctp_transport *sctp_transport_new(const union sctp_addr *,
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
gfp_t);
void sctp_transport_set_owner(struct sctp_transport *,
struct sctp_association *);
void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
struct sctp_sock *);
-void sctp_transport_pmtu(struct sctp_transport *);
+void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_timers(struct sctp_transport *);
void sctp_transport_hold(struct sctp_transport *);
@@ -1073,7 +958,8 @@ void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
@@ -1093,8 +979,6 @@ struct sctp_inq {
* messages.
*/
struct work_struct immediate;
-
- int malloced; /* Is this structure kfree()able? */
};
void sctp_inq_init(struct sctp_inq *);
@@ -1129,10 +1013,10 @@ struct sctp_outq {
/* Data pending that has never been transmitted. */
struct list_head out_chunk_list;
- unsigned out_qlen; /* Total length of queued data chunks. */
+ unsigned int out_qlen; /* Total length of queued data chunks. */
/* Error of send failed, may used in SCTP_SEND_FAILED event. */
- unsigned error;
+ unsigned int error;
/* These are control chunks we want to send. */
struct list_head control_chunk_list;
@@ -1160,19 +1044,13 @@ struct sctp_outq {
/* Corked? */
char cork;
-
- /* Is this structure empty? */
- char empty;
-
- /* Are we kfree()able? */
- char malloced;
};
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk);
-int sctp_outq_sack(struct sctp_outq *, struct sctp_sackhdr *);
+int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1203,13 +1081,11 @@ struct sctp_bind_addr {
* peer(s) in INIT and INIT ACK chunks.
*/
struct list_head address_list;
-
- int malloced; /* Are we kfree()able? */
};
void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
void sctp_bind_addr_free(struct sctp_bind_addr *);
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
sctp_scope_t scope, gfp_t gfp,
int flags);
@@ -1236,9 +1112,10 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
__u16 port, gfp_t gfp);
sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
int sctp_addr_is_valid(const union sctp_addr *addr);
+int sctp_is_ep_boundall(struct sock *sk);
/* What type of endpoint? */
@@ -1274,11 +1151,9 @@ struct sctp_ep_common {
/* Some fields to help us manage this object.
* refcnt - Reference count access to this object.
* dead - Do not attempt to use this object.
- * malloced - Do we need to kfree this object?
*/
atomic_t refcnt;
- char dead;
- char malloced;
+ bool dead;
/* What socket does this endpoint belong to? */
struct sock *sk;
@@ -1336,10 +1211,7 @@ struct sctp_endpoint {
* Discussion in [RFC1750] can be helpful in
* selection of the key.
*/
- __u8 secret_key[SCTP_HOW_MANY_SECRETS][SCTP_SECRET_SIZE];
- int current_key;
- int last_key;
- int key_changed_at;
+ __u8 secret_key[SCTP_SECRET_SIZE];
/* digest: This is a digest of the sctp cookie. This field is
* only used on the receive path when we try to validate
@@ -1369,6 +1241,7 @@ struct sctp_endpoint {
/* SCTP-AUTH: endpoint shared keys */
struct list_head endpoint_shared_keys;
__u16 active_key_id;
+ __u8 auth_enable;
};
/* Recover the outter endpoint structure. */
@@ -1393,14 +1266,15 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
const union sctp_addr *);
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
- const union sctp_addr *);
-int sctp_has_association(const union sctp_addr *laddr,
+ struct net *, const union sctp_addr *);
+int sctp_has_association(struct net *net, const union sctp_addr *laddr,
const union sctp_addr *paddr);
-int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
- sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
- struct sctp_chunk **err_chunk);
-int sctp_process_init(struct sctp_association *, sctp_cid_t cid,
+int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ sctp_cid_t, sctp_init_chunk_t *peer_init,
+ struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
+int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
const union sctp_addr *peer,
sctp_init_chunk_t *init, gfp_t gfp);
__u32 sctp_generate_tag(const struct sctp_endpoint *);
@@ -1414,6 +1288,40 @@ struct sctp_inithdr_host {
__u32 initial_tsn;
};
+/* SCTP_GET_ASSOC_STATS counters */
+struct sctp_priv_assoc_stats {
+ /* Maximum observed rto in the association during subsequent
+ * observations. Value is set to 0 if no RTO measurement took place
+ * The transport where the max_rto was observed is returned in
+ * obs_rto_ipaddr
+ */
+ struct sockaddr_storage obs_rto_ipaddr;
+ __u64 max_obs_rto;
+ /* Total In and Out SACKs received and sent */
+ __u64 isacks;
+ __u64 osacks;
+ /* Total In and Out packets received and sent */
+ __u64 opackets;
+ __u64 ipackets;
+ /* Total retransmitted chunks */
+ __u64 rtxchunks;
+ /* TSN received > next expected */
+ __u64 outofseqtsns;
+ /* Duplicate Chunks received */
+ __u64 idupchunks;
+ /* Gap Ack Blocks received */
+ __u64 gapcnt;
+ /* Unordered data chunks sent and received */
+ __u64 ouodchunks;
+ __u64 iuodchunks;
+ /* Ordered data chunks sent and received */
+ __u64 oodchunks;
+ __u64 iodchunks;
+ /* Control chunks sent and received */
+ __u64 octrlchunks;
+ __u64 ictrlchunks;
+};
+
/* RFC2960
*
* 12. Recommended Transmission Control Block (TCB) Parameters
@@ -1450,12 +1358,6 @@ struct sctp_association {
/* This is all information about our peer. */
struct {
- /* rwnd
- *
- * Peer Rwnd : Current calculated value of the peer's rwnd.
- */
- __u32 rwnd;
-
/* transport_addr_list
*
* Peer : A list of SCTP transport addresses that the
@@ -1473,6 +1375,12 @@ struct sctp_association {
*/
struct list_head transport_addr_list;
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
/* transport_count
*
* Peer : A count of the number of peer addresses
@@ -1555,6 +1463,20 @@ struct sctp_association {
*/
struct sctp_tsnmap tsn_map;
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __be16 addip_disabled_mask;
+
+ /* These are capabilities which our peer advertised. */
+ __u8 ecn_capable:1, /* Can peer do ECN? */
+ ipv4_address:1, /* Peer understands IPv4 addresses? */
+ ipv6_address:1, /* Peer understands IPv6 addresses? */
+ hostname_address:1, /* Peer understands DNS addresses? */
+ asconf_capable:1, /* Does peer support ADDIP? */
+ prsctp_capable:1, /* Can peer do PR-SCTP? */
+ auth_capable:1; /* Is peer doing SCTP-AUTH? */
+
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
@@ -1567,26 +1489,13 @@ struct sctp_association {
*/
__u8 sack_needed; /* Do we need to sack the peer? */
__u32 sack_cnt;
-
- /* These are capabilities which our peer advertised. */
- __u8 ecn_capable:1, /* Can peer do ECN? */
- ipv4_address:1, /* Peer understands IPv4 addresses? */
- ipv6_address:1, /* Peer understands IPv6 addresses? */
- hostname_address:1, /* Peer understands DNS addresses? */
- asconf_capable:1, /* Does peer support ADDIP? */
- prsctp_capable:1, /* Can peer do PR-SCTP? */
- auth_capable:1; /* Is peer doing SCTP-AUTH? */
+ __u32 sack_generation;
__u32 adaptation_ind; /* Adaptation Code point. */
- /* This mask is used to disable sending the ASCONF chunk
- * with specified parameter to peer.
- */
- __be16 addip_disabled_mask;
-
struct sctp_inithdr_host i;
- int cookie_len;
void *cookie;
+ int cookie_len;
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
* C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
@@ -1618,14 +1527,14 @@ struct sctp_association {
*/
sctp_state_t state;
- /* The cookie life I award for any cookie. */
- struct timeval cookie_life;
-
/* Overall : The overall association error count.
* Error Count : [Clear this any time I get something.]
*/
int overall_error_count;
+ /* The cookie life I award for any cookie. */
+ ktime_t cookie_life;
+
/* These are the association's initial, max, and min RTO values.
* These values will be initialized by system defaults, but can
* be modified via the SCTP_RTOINFO socket option.
@@ -1643,6 +1552,12 @@ struct sctp_association {
*/
int max_retrans;
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be
+ * changed using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ int pf_retrans;
+
/* Maximum number of times the endpoint will retransmit INIT */
__u16 max_init_attempts;
@@ -1674,10 +1589,9 @@ struct sctp_association {
/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
+ __u32 sackfreq;
/* SACK delay timeout */
unsigned long sackdelay;
- __u32 sackfreq;
-
unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
@@ -1685,12 +1599,12 @@ struct sctp_association {
/* Transport to which SHUTDOWN chunk was last sent. */
struct sctp_transport *shutdown_last_sent_to;
- /* How many times have we resent a SHUTDOWN */
- int shutdown_retries;
-
/* Transport to which INIT chunk was last sent. */
struct sctp_transport *init_last_sent_to;
+ /* How many times have we resent a SHUTDOWN */
+ int shutdown_retries;
+
/* Next TSN : The next TSN number to be assigned to a new
* : DATA chunk. This is sent in the INIT or INIT
* : ACK chunk to the peer and incremented each
@@ -1808,12 +1722,6 @@ struct sctp_association {
/* How many duplicated TSNs have we seen? */
int numduptsns;
- /* Number of seconds of idle time before an association is closed.
- * In the association context, this is really used as a boolean
- * since the real timeout is stored in the timeouts array
- */
- __u32 autoclose;
-
/* These are to support
* "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
* and Enforcement of Flow and Message Limits"
@@ -1901,6 +1809,9 @@ struct sctp_association {
* after reaching 4294967295.
*/
__u32 addip_serial;
+ int src_out_of_asoc_ok;
+ union sctp_addr *asconf_addr_del_pending;
+ struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
* keys are provided out of band by the user applicaton
@@ -1922,6 +1833,8 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1; /* Is it a temporary association? */
+
+ struct sctp_priv_assoc_stats stats;
};
@@ -1971,6 +1884,7 @@ void sctp_assoc_control_transport(struct sctp_association *,
sctp_transport_cmd_t, sctp_sn_error_t);
struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+ struct net *,
const union sctp_addr *,
const union sctp_addr *);
void sctp_assoc_migrate(struct sctp_association *, struct sock *);
@@ -1979,9 +1893,9 @@ void sctp_assoc_update(struct sctp_association *old,
__u32 sctp_association_get_next_tsn(struct sctp_association *);
-void sctp_assoc_sync_pmtu(struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned);
+void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
@@ -1996,7 +1910,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
const struct sctp_association *asoc,
__be32 serial);
-
+void sctp_asconf_queue_teardown(struct sctp_association *asoc);
int sctp_cmp_addr_exact(const union sctp_addr *ss1,
const union sctp_addr *ss2);
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index e7728bc14cc..31b8dbaad45 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -22,25 +22,18 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/constants.h>
@@ -117,7 +110,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+ struct sctp_transport *trans);
/* Mark this TSN and all lower as seen. */
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 7ea12e8e667..daacb32b55b 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -25,25 +25,18 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpevent_h__
@@ -80,7 +73,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
@@ -132,6 +125,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey(
const struct sctp_association *asoc, __u16 key_id,
__u32 indication, gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
+ const struct sctp_association *asoc, gfp_t gfp);
+
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 2e5ee0d8458..e0dce07b879 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -24,24 +24,17 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpqueue_h__
@@ -49,7 +42,6 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
struct sctp_ulpq {
- char malloced;
char pd_mode;
struct sctp_association *asoc;
struct sk_buff_head reasm;
@@ -72,7 +64,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Perform partial delivery. */
-void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
/* Abort the partial delivery. */
void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
deleted file mode 100644
index 2b2769c5ca9..00000000000
--- a/include/net/sctp/user.h
+++ /dev/null
@@ -1,711 +0,0 @@
-/* SCTP kernel implementation
- * (C) Copyright IBM Corp. 2001, 2004
- * Copyright (c) 1999-2000 Cisco, Inc.
- * Copyright (c) 1999-2001 Motorola, Inc.
- * Copyright (c) 2002 Intel Corp.
- *
- * This file is part of the SCTP kernel implementation
- *
- * This header represents the structures and constants needed to support
- * the SCTP Extension to the Sockets API.
- *
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- * ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- *
- * Please send any bug reports or fixes you make to the
- * email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
- *
- * Written or modified by:
- * La Monte H.P. Yarroll <piggy@acm.org>
- * R. Stewart <randall@sctp.chicago.il.us>
- * K. Morneau <kmorneau@cisco.com>
- * Q. Xie <qxie1@email.mot.com>
- * Karl Knutson <karl@athena.chicago.il.us>
- * Jon Grimm <jgrimm@us.ibm.com>
- * Daisy Chang <daisyc@us.ibm.com>
- * Ryan Layer <rmlayer@us.ibm.com>
- * Ardelle Fan <ardelle.fan@intel.com>
- * Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
- */
-
-#ifndef __net_sctp_user_h__
-#define __net_sctp_user_h__
-
-#include <linux/types.h>
-#include <linux/socket.h>
-
-typedef __s32 sctp_assoc_t;
-
-/* The following symbols come from the Sockets API Extensions for
- * SCTP <draft-ietf-tsvwg-sctpsocket-07.txt>.
- */
-#define SCTP_RTOINFO 0
-#define SCTP_ASSOCINFO 1
-#define SCTP_INITMSG 2
-#define SCTP_NODELAY 3 /* Get/set nodelay option. */
-#define SCTP_AUTOCLOSE 4
-#define SCTP_SET_PEER_PRIMARY_ADDR 5
-#define SCTP_PRIMARY_ADDR 6
-#define SCTP_ADAPTATION_LAYER 7
-#define SCTP_DISABLE_FRAGMENTS 8
-#define SCTP_PEER_ADDR_PARAMS 9
-#define SCTP_DEFAULT_SEND_PARAM 10
-#define SCTP_EVENTS 11
-#define SCTP_I_WANT_MAPPED_V4_ADDR 12 /* Turn on/off mapped v4 addresses */
-#define SCTP_MAXSEG 13 /* Get/set maximum fragment. */
-#define SCTP_STATUS 14
-#define SCTP_GET_PEER_ADDR_INFO 15
-#define SCTP_DELAYED_ACK_TIME 16
-#define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME
-#define SCTP_CONTEXT 17
-#define SCTP_FRAGMENT_INTERLEAVE 18
-#define SCTP_PARTIAL_DELIVERY_POINT 19 /* Set/Get partial delivery point */
-#define SCTP_MAX_BURST 20 /* Set/Get max burst */
-#define SCTP_AUTH_CHUNK 21 /* Set only: add a chunk type to authenticate */
-#define SCTP_HMAC_IDENT 22
-#define SCTP_AUTH_KEY 23
-#define SCTP_AUTH_ACTIVE_KEY 24
-#define SCTP_AUTH_DELETE_KEY 25
-#define SCTP_PEER_AUTH_CHUNKS 26 /* Read only */
-#define SCTP_LOCAL_AUTH_CHUNKS 27 /* Read only */
-#define SCTP_GET_ASSOC_NUMBER 28 /* Read only */
-
-/* Internal Socket Options. Some of the sctp library functions are
- * implemented using these socket options.
- */
-#define SCTP_SOCKOPT_BINDX_ADD 100 /* BINDX requests for adding addrs */
-#define SCTP_SOCKOPT_BINDX_REM 101 /* BINDX requests for removing addrs. */
-#define SCTP_SOCKOPT_PEELOFF 102 /* peel off association. */
-/* Options 104-106 are deprecated and removed. Do not use this space */
-#define SCTP_SOCKOPT_CONNECTX_OLD 107 /* CONNECTX old requests. */
-#define SCTP_GET_PEER_ADDRS 108 /* Get all peer addresss. */
-#define SCTP_GET_LOCAL_ADDRS 109 /* Get all local addresss. */
-#define SCTP_SOCKOPT_CONNECTX 110 /* CONNECTX requests. */
-#define SCTP_SOCKOPT_CONNECTX3 111 /* CONNECTX requests (updated) */
-
-/*
- * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
- *
- * This cmsghdr structure provides information for initializing new
- * SCTP associations with sendmsg(). The SCTP_INITMSG socket option
- * uses this same data structure. This structure is not used for
- * recvmsg().
- *
- * cmsg_level cmsg_type cmsg_data[]
- * ------------ ------------ ----------------------
- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
- *
- */
-struct sctp_initmsg {
- __u16 sinit_num_ostreams;
- __u16 sinit_max_instreams;
- __u16 sinit_max_attempts;
- __u16 sinit_max_init_timeo;
-};
-
-/*
- * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
- *
- * This cmsghdr structure specifies SCTP options for sendmsg() and
- * describes SCTP header information about a received message through
- * recvmsg().
- *
- * cmsg_level cmsg_type cmsg_data[]
- * ------------ ------------ ----------------------
- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
- *
- */
-struct sctp_sndrcvinfo {
- __u16 sinfo_stream;
- __u16 sinfo_ssn;
- __u16 sinfo_flags;
- __u32 sinfo_ppid;
- __u32 sinfo_context;
- __u32 sinfo_timetolive;
- __u32 sinfo_tsn;
- __u32 sinfo_cumtsn;
- sctp_assoc_t sinfo_assoc_id;
-};
-
-/*
- * sinfo_flags: 16 bits (unsigned integer)
- *
- * This field may contain any of the following flags and is composed of
- * a bitwise OR of these values.
- */
-
-enum sctp_sinfo_flags {
- SCTP_UNORDERED = 1, /* Send/receive message unordered. */
- SCTP_ADDR_OVER = 2, /* Override the primary destination. */
- SCTP_ABORT=4, /* Send an ABORT message to the peer. */
- SCTP_SACK_IMMEDIATELY = 8, /* SACK should be sent without delay */
- SCTP_EOF=MSG_FIN, /* Initiate graceful shutdown process. */
-};
-
-
-/* These are cmsg_types. */
-typedef enum sctp_cmsg_type {
- SCTP_INIT, /* 5.2.1 SCTP Initiation Structure */
- SCTP_SNDRCV, /* 5.2.2 SCTP Header Information Structure */
-} sctp_cmsg_t;
-
-
-/*
- * 5.3.1.1 SCTP_ASSOC_CHANGE
- *
- * Communication notifications inform the ULP that an SCTP association
- * has either begun or ended. The identifier for a new association is
- * provided by this notificaion. The notification information has the
- * following format:
- *
- */
-struct sctp_assoc_change {
- __u16 sac_type;
- __u16 sac_flags;
- __u32 sac_length;
- __u16 sac_state;
- __u16 sac_error;
- __u16 sac_outbound_streams;
- __u16 sac_inbound_streams;
- sctp_assoc_t sac_assoc_id;
- __u8 sac_info[0];
-};
-
-/*
- * sac_state: 32 bits (signed integer)
- *
- * This field holds one of a number of values that communicate the
- * event that happened to the association. They include:
- *
- * Note: The following state names deviate from the API draft as
- * the names clash too easily with other kernel symbols.
- */
-enum sctp_sac_state {
- SCTP_COMM_UP,
- SCTP_COMM_LOST,
- SCTP_RESTART,
- SCTP_SHUTDOWN_COMP,
- SCTP_CANT_STR_ASSOC,
-};
-
-/*
- * 5.3.1.2 SCTP_PEER_ADDR_CHANGE
- *
- * When a destination address on a multi-homed peer encounters a change
- * an interface details event is sent. The information has the
- * following structure:
- */
-struct sctp_paddr_change {
- __u16 spc_type;
- __u16 spc_flags;
- __u32 spc_length;
- struct sockaddr_storage spc_aaddr;
- int spc_state;
- int spc_error;
- sctp_assoc_t spc_assoc_id;
-} __attribute__((packed, aligned(4)));
-
-/*
- * spc_state: 32 bits (signed integer)
- *
- * This field holds one of a number of values that communicate the
- * event that happened to the address. They include:
- */
-enum sctp_spc_state {
- SCTP_ADDR_AVAILABLE,
- SCTP_ADDR_UNREACHABLE,
- SCTP_ADDR_REMOVED,
- SCTP_ADDR_ADDED,
- SCTP_ADDR_MADE_PRIM,
- SCTP_ADDR_CONFIRMED,
-};
-
-
-/*
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * A remote peer may send an Operational Error message to its peer.
- * This message indicates a variety of error conditions on an
- * association. The entire error TLV as it appears on the wire is
- * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
- * specification [SCTP] and any extensions for a list of possible
- * error formats. SCTP error TLVs have the format:
- */
-struct sctp_remote_error {
- __u16 sre_type;
- __u16 sre_flags;
- __u32 sre_length;
- __u16 sre_error;
- sctp_assoc_t sre_assoc_id;
- __u8 sre_data[0];
-};
-
-
-/*
- * 5.3.1.4 SCTP_SEND_FAILED
- *
- * If SCTP cannot deliver a message it may return the message as a
- * notification.
- */
-struct sctp_send_failed {
- __u16 ssf_type;
- __u16 ssf_flags;
- __u32 ssf_length;
- __u32 ssf_error;
- struct sctp_sndrcvinfo ssf_info;
- sctp_assoc_t ssf_assoc_id;
- __u8 ssf_data[0];
-};
-
-/*
- * ssf_flags: 16 bits (unsigned integer)
- *
- * The flag value will take one of the following values
- *
- * SCTP_DATA_UNSENT - Indicates that the data was never put on
- * the wire.
- *
- * SCTP_DATA_SENT - Indicates that the data was put on the wire.
- * Note that this does not necessarily mean that the
- * data was (or was not) successfully delivered.
- */
-enum sctp_ssf_flags {
- SCTP_DATA_UNSENT,
- SCTP_DATA_SENT,
-};
-
-/*
- * 5.3.1.5 SCTP_SHUTDOWN_EVENT
- *
- * When a peer sends a SHUTDOWN, SCTP delivers this notification to
- * inform the application that it should cease sending data.
- */
-struct sctp_shutdown_event {
- __u16 sse_type;
- __u16 sse_flags;
- __u32 sse_length;
- sctp_assoc_t sse_assoc_id;
-};
-
-/*
- * 5.3.1.6 SCTP_ADAPTATION_INDICATION
- *
- * When a peer sends a Adaptation Layer Indication parameter , SCTP
- * delivers this notification to inform the application
- * that of the peers requested adaptation layer.
- */
-struct sctp_adaptation_event {
- __u16 sai_type;
- __u16 sai_flags;
- __u32 sai_length;
- __u32 sai_adaptation_ind;
- sctp_assoc_t sai_assoc_id;
-};
-
-/*
- * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
- *
- * When a receiver is engaged in a partial delivery of a
- * message this notification will be used to indicate
- * various events.
- */
-struct sctp_pdapi_event {
- __u16 pdapi_type;
- __u16 pdapi_flags;
- __u32 pdapi_length;
- __u32 pdapi_indication;
- sctp_assoc_t pdapi_assoc_id;
-};
-
-enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
-
-struct sctp_authkey_event {
- __u16 auth_type;
- __u16 auth_flags;
- __u32 auth_length;
- __u16 auth_keynumber;
- __u16 auth_altkeynumber;
- __u32 auth_indication;
- sctp_assoc_t auth_assoc_id;
-};
-
-enum { SCTP_AUTH_NEWKEY = 0, };
-
-
-/*
- * Described in Section 7.3
- * Ancillary Data and Notification Interest Options
- */
-struct sctp_event_subscribe {
- __u8 sctp_data_io_event;
- __u8 sctp_association_event;
- __u8 sctp_address_event;
- __u8 sctp_send_failure_event;
- __u8 sctp_peer_error_event;
- __u8 sctp_shutdown_event;
- __u8 sctp_partial_delivery_event;
- __u8 sctp_adaptation_layer_event;
- __u8 sctp_authentication_event;
-};
-
-/*
- * 5.3.1 SCTP Notification Structure
- *
- * The notification structure is defined as the union of all
- * notification types.
- *
- */
-union sctp_notification {
- struct {
- __u16 sn_type; /* Notification type. */
- __u16 sn_flags;
- __u32 sn_length;
- } sn_header;
- struct sctp_assoc_change sn_assoc_change;
- struct sctp_paddr_change sn_paddr_change;
- struct sctp_remote_error sn_remote_error;
- struct sctp_send_failed sn_send_failed;
- struct sctp_shutdown_event sn_shutdown_event;
- struct sctp_adaptation_event sn_adaptation_event;
- struct sctp_pdapi_event sn_pdapi_event;
- struct sctp_authkey_event sn_authkey_event;
-};
-
-/* Section 5.3.1
- * All standard values for sn_type flags are greater than 2^15.
- * Values from 2^15 and down are reserved.
- */
-
-enum sctp_sn_type {
- SCTP_SN_TYPE_BASE = (1<<15),
- SCTP_ASSOC_CHANGE,
- SCTP_PEER_ADDR_CHANGE,
- SCTP_SEND_FAILED,
- SCTP_REMOTE_ERROR,
- SCTP_SHUTDOWN_EVENT,
- SCTP_PARTIAL_DELIVERY_EVENT,
- SCTP_ADAPTATION_INDICATION,
- SCTP_AUTHENTICATION_INDICATION,
-};
-
-/* Notification error codes used to fill up the error fields in some
- * notifications.
- * SCTP_PEER_ADDRESS_CHAGE : spc_error
- * SCTP_ASSOC_CHANGE : sac_error
- * These names should be potentially included in the draft 04 of the SCTP
- * sockets API specification.
- */
-typedef enum sctp_sn_error {
- SCTP_FAILED_THRESHOLD,
- SCTP_RECEIVED_SACK,
- SCTP_HEARTBEAT_SUCCESS,
- SCTP_RESPONSE_TO_USER_REQ,
- SCTP_INTERNAL_ERROR,
- SCTP_SHUTDOWN_GUARD_EXPIRES,
- SCTP_PEER_FAULTY,
-} sctp_sn_error_t;
-
-/*
- * 7.1.1 Retransmission Timeout Parameters (SCTP_RTOINFO)
- *
- * The protocol parameters used to initialize and bound retransmission
- * timeout (RTO) are tunable. See [SCTP] for more information on how
- * these parameters are used in RTO calculation.
- */
-struct sctp_rtoinfo {
- sctp_assoc_t srto_assoc_id;
- __u32 srto_initial;
- __u32 srto_max;
- __u32 srto_min;
-};
-
-/*
- * 7.1.2 Association Parameters (SCTP_ASSOCINFO)
- *
- * This option is used to both examine and set various association and
- * endpoint parameters.
- */
-struct sctp_assocparams {
- sctp_assoc_t sasoc_assoc_id;
- __u16 sasoc_asocmaxrxt;
- __u16 sasoc_number_peer_destinations;
- __u32 sasoc_peer_rwnd;
- __u32 sasoc_local_rwnd;
- __u32 sasoc_cookie_life;
-};
-
-/*
- * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
- *
- * Requests that the peer mark the enclosed address as the association
- * primary. The enclosed address must be one of the association's
- * locally bound addresses. The following structure is used to make a
- * set primary request:
- */
-struct sctp_setpeerprim {
- sctp_assoc_t sspp_assoc_id;
- struct sockaddr_storage sspp_addr;
-} __attribute__((packed, aligned(4)));
-
-/*
- * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
- *
- * Requests that the local SCTP stack use the enclosed peer address as
- * the association primary. The enclosed address must be one of the
- * association peer's addresses. The following structure is used to
- * make a set peer primary request:
- */
-struct sctp_prim {
- sctp_assoc_t ssp_assoc_id;
- struct sockaddr_storage ssp_addr;
-} __attribute__((packed, aligned(4)));
-
-/*
- * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
- *
- * Requests that the local endpoint set the specified Adaptation Layer
- * Indication parameter for all future INIT and INIT-ACK exchanges.
- */
-struct sctp_setadaptation {
- __u32 ssb_adaptation_ind;
-};
-
-/*
- * 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
- *
- * Applications can enable or disable heartbeats for any peer address
- * of an association, modify an address's heartbeat interval, force a
- * heartbeat to be sent immediately, and adjust the address's maximum
- * number of retransmissions sent before an address is considered
- * unreachable. The following structure is used to access and modify an
- * address's parameters:
- */
-enum sctp_spp_flags {
- SPP_HB_ENABLE = 1<<0, /*Enable heartbeats*/
- SPP_HB_DISABLE = 1<<1, /*Disable heartbeats*/
- SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE,
- SPP_HB_DEMAND = 1<<2, /*Send heartbeat immediately*/
- SPP_PMTUD_ENABLE = 1<<3, /*Enable PMTU discovery*/
- SPP_PMTUD_DISABLE = 1<<4, /*Disable PMTU discovery*/
- SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE,
- SPP_SACKDELAY_ENABLE = 1<<5, /*Enable SACK*/
- SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/
- SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
- SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */
-};
-
-struct sctp_paddrparams {
- sctp_assoc_t spp_assoc_id;
- struct sockaddr_storage spp_address;
- __u32 spp_hbinterval;
- __u16 spp_pathmaxrxt;
- __u32 spp_pathmtu;
- __u32 spp_sackdelay;
- __u32 spp_flags;
-} __attribute__((packed, aligned(4)));
-
-/*
- * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
- *
- * This set option adds a chunk type that the user is requesting to be
- * received only in an authenticated way. Changes to the list of chunks
- * will only effect future associations on the socket.
- */
-struct sctp_authchunk {
- __u8 sauth_chunk;
-};
-
-/*
- * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
- *
- * This option gets or sets the list of HMAC algorithms that the local
- * endpoint requires the peer to use.
-*/
-struct sctp_hmacalgo {
- __u32 shmac_num_idents;
- __u16 shmac_idents[];
-};
-
-/*
- * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
- *
- * This option will set a shared secret key which is used to build an
- * association shared key.
- */
-struct sctp_authkey {
- sctp_assoc_t sca_assoc_id;
- __u16 sca_keynumber;
- __u16 sca_keylength;
- __u8 sca_key[];
-};
-
-/*
- * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
- *
- * This option will get or set the active shared key to be used to build
- * the association shared key.
- */
-
-struct sctp_authkeyid {
- sctp_assoc_t scact_assoc_id;
- __u16 scact_keynumber;
-};
-
-
-/*
- * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
- *
- * This option will effect the way delayed acks are performed. This
- * option allows you to get or set the delayed ack time, in
- * milliseconds. It also allows changing the delayed ack frequency.
- * Changing the frequency to 1 disables the delayed sack algorithm. If
- * the assoc_id is 0, then this sets or gets the endpoints default
- * values. If the assoc_id field is non-zero, then the set or get
- * effects the specified association for the one to many model (the
- * assoc_id field is ignored by the one to one model). Note that if
- * sack_delay or sack_freq are 0 when setting this option, then the
- * current values will remain unchanged.
- */
-struct sctp_sack_info {
- sctp_assoc_t sack_assoc_id;
- uint32_t sack_delay;
- uint32_t sack_freq;
-};
-
-struct sctp_assoc_value {
- sctp_assoc_t assoc_id;
- uint32_t assoc_value;
-};
-
-/*
- * 7.2.2 Peer Address Information
- *
- * Applications can retrieve information about a specific peer address
- * of an association, including its reachability state, congestion
- * window, and retransmission timer values. This information is
- * read-only. The following structure is used to access this
- * information:
- */
-struct sctp_paddrinfo {
- sctp_assoc_t spinfo_assoc_id;
- struct sockaddr_storage spinfo_address;
- __s32 spinfo_state;
- __u32 spinfo_cwnd;
- __u32 spinfo_srtt;
- __u32 spinfo_rto;
- __u32 spinfo_mtu;
-} __attribute__((packed, aligned(4)));
-
-/* Peer addresses's state. */
-/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x]
- * calls.
- * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters.
- * Not yet confirmed by a heartbeat and not available for data
- * transfers.
- * ACTIVE : Peer address confirmed, active and available for data transfers.
- * INACTIVE: Peer address inactive and not available for data transfers.
- */
-enum sctp_spinfo_state {
- SCTP_INACTIVE,
- SCTP_ACTIVE,
- SCTP_UNCONFIRMED,
- SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
-};
-
-/*
- * 7.2.1 Association Status (SCTP_STATUS)
- *
- * Applications can retrieve current status information about an
- * association, including association state, peer receiver window size,
- * number of unacked data chunks, and number of data chunks pending
- * receipt. This information is read-only. The following structure is
- * used to access this information:
- */
-struct sctp_status {
- sctp_assoc_t sstat_assoc_id;
- __s32 sstat_state;
- __u32 sstat_rwnd;
- __u16 sstat_unackdata;
- __u16 sstat_penddata;
- __u16 sstat_instrms;
- __u16 sstat_outstrms;
- __u32 sstat_fragmentation_point;
- struct sctp_paddrinfo sstat_primary;
-};
-
-/*
- * 7.2.3. Get the list of chunks the peer requires to be authenticated
- * (SCTP_PEER_AUTH_CHUNKS)
- *
- * This option gets a list of chunks for a specified association that
- * the peer requires to be received authenticated only.
- */
-struct sctp_authchunks {
- sctp_assoc_t gauth_assoc_id;
- __u32 gauth_number_of_chunks;
- uint8_t gauth_chunks[];
-};
-
-/*
- * 8.3, 8.5 get all peer/local addresses in an association.
- * This parameter struct is used by SCTP_GET_PEER_ADDRS and
- * SCTP_GET_LOCAL_ADDRS socket options used internally to implement
- * sctp_getpaddrs() and sctp_getladdrs() API.
- */
-struct sctp_getaddrs_old {
- sctp_assoc_t assoc_id;
- int addr_num;
- struct sockaddr __user *addrs;
-};
-struct sctp_getaddrs {
- sctp_assoc_t assoc_id; /*input*/
- __u32 addr_num; /*output*/
- __u8 addrs[0]; /*output, variable size*/
-};
-
-/* These are bit fields for msghdr->msg_flags. See section 5.1. */
-/* On user space Linux, these live in <bits/socket.h> as an enum. */
-enum sctp_msg_flags {
- MSG_NOTIFICATION = 0x8000,
-#define MSG_NOTIFICATION MSG_NOTIFICATION
-};
-
-/*
- * 8.1 sctp_bindx()
- *
- * The flags parameter is formed from the bitwise OR of zero or more of the
- * following currently defined flags:
- */
-#define SCTP_BINDX_ADD_ADDR 0x01
-#define SCTP_BINDX_REM_ADDR 0x02
-
-/* This is the structure that is passed as an argument(optval) to
- * getsockopt(SCTP_SOCKOPT_PEELOFF).
- */
-typedef struct {
- sctp_assoc_t associd;
- int sd;
-} sctp_peeloff_arg_t;
-
-#endif /* __net_sctp_user_h__ */
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
new file mode 100644
index 00000000000..3f36d45b714
--- /dev/null
+++ b/include/net/secure_seq.h
@@ -0,0 +1,18 @@
+#ifndef _NET_SECURE_SEQ
+#define _NET_SECURE_SEQ
+
+#include <linux/types.h>
+
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport);
+
+#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 762e2abce88..f1f27fdbb0d 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -67,18 +67,28 @@ struct icmp_mib {
#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
struct icmpmsg_mib {
- unsigned long mibs[ICMPMSG_MIB_MAX];
+ atomic_long_t mibs[ICMPMSG_MIB_MAX];
};
/* ICMP6 (IPv6-ICMP) */
#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
+/* per network ns counters */
struct icmpv6_mib {
unsigned long mibs[ICMP6_MIB_MAX];
};
+/* per device counters, (shared on all cpus) */
+struct icmpv6_mib_device {
+ atomic_long_t mibs[ICMP6_MIB_MAX];
+};
#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
+/* per network ns counters */
struct icmpv6msg_mib {
- unsigned long mibs[ICMP6MSG_MIB_MAX];
+ atomic_long_t mibs[ICMP6MSG_MIB_MAX];
+};
+/* per device counters, (shared on all cpus) */
+struct icmpv6msg_mib_device {
+ atomic_long_t mibs[ICMP6MSG_MIB_MAX];
};
@@ -106,53 +116,51 @@ struct linux_xfrm_mib {
unsigned long mibs[LINUX_MIB_XFRMMAX];
};
-/*
- * FIXME: On x86 and some other CPUs the split into user and softirq parts
- * is not needed because addl $1,memory is atomic against interrupts (but
- * atomic_inc would be overkill because of the lock cycles). Wants new
- * nonlocked_atomic_inc() primitives -AK
- */
#define DEFINE_SNMP_STAT(type, name) \
- __typeof__(type) __percpu *name[2]
+ __typeof__(type) __percpu *name
+#define DEFINE_SNMP_STAT_ATOMIC(type, name) \
+ __typeof__(type) *name
#define DECLARE_SNMP_STAT(type, name) \
- extern __typeof__(type) __percpu *name[2]
-
-#define SNMP_STAT_BHPTR(name) (name[0])
-#define SNMP_STAT_USRPTR(name) (name[1])
+ extern __typeof__(type) __percpu *name
#define SNMP_INC_STATS_BH(mib, field) \
- __this_cpu_inc(mib[0]->mibs[field])
+ __this_cpu_inc(mib->mibs[field])
+
#define SNMP_INC_STATS_USER(mib, field) \
- this_cpu_inc(mib[1]->mibs[field])
+ this_cpu_inc(mib->mibs[field])
+
+#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
+ atomic_long_inc(&mib->mibs[field])
+
#define SNMP_INC_STATS(mib, field) \
- this_cpu_inc(mib[!in_softirq()]->mibs[field])
+ this_cpu_inc(mib->mibs[field])
+
#define SNMP_DEC_STATS(mib, field) \
- this_cpu_dec(mib[!in_softirq()]->mibs[field])
+ this_cpu_dec(mib->mibs[field])
+
#define SNMP_ADD_STATS_BH(mib, field, addend) \
- __this_cpu_add(mib[0]->mibs[field], addend)
+ __this_cpu_add(mib->mibs[field], addend)
+
#define SNMP_ADD_STATS_USER(mib, field, addend) \
- this_cpu_add(mib[1]->mibs[field], addend)
+ this_cpu_add(mib->mibs[field], addend)
+
#define SNMP_ADD_STATS(mib, field, addend) \
- this_cpu_add(mib[!in_softirq()]->mibs[field], addend)
+ this_cpu_add(mib->mibs[field], addend)
/*
- * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
+ * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
* to make @ptr a non-percpu pointer.
*/
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(*mib[0]) *ptr; \
- preempt_disable(); \
- ptr = this_cpu_ptr((mib)[!in_softirq()]); \
- ptr->mibs[basefield##PKTS]++; \
- ptr->mibs[basefield##OCTETS] += addend;\
- preempt_enable(); \
+ __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ this_cpu_inc(ptr[basefield##PKTS]); \
+ this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib[0]) *ptr = \
- __this_cpu_ptr((mib)[!in_softirq()]); \
- ptr->mibs[basefield##PKTS]++; \
- ptr->mibs[basefield##OCTETS] += addend;\
+ __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __this_cpu_inc(ptr[basefield##PKTS]); \
+ __this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
@@ -160,53 +168,39 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS64_BH(mib, field, addend) \
do { \
- __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]); \
+ __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
} while (0)
+
#define SNMP_ADD_STATS64_USER(mib, field, addend) \
do { \
- __typeof__(*mib[0]) *ptr; \
- preempt_disable(); \
- ptr = __this_cpu_ptr((mib)[1]); \
- u64_stats_update_begin(&ptr->syncp); \
- ptr->mibs[field] += addend; \
- u64_stats_update_end(&ptr->syncp); \
- preempt_enable(); \
+ local_bh_disable(); \
+ SNMP_ADD_STATS64_BH(mib, field, addend); \
+ local_bh_enable(); \
} while (0)
+
#define SNMP_ADD_STATS64(mib, field, addend) \
- do { \
- __typeof__(*mib[0]) *ptr; \
- preempt_disable(); \
- ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
- u64_stats_update_begin(&ptr->syncp); \
- ptr->mibs[field] += addend; \
- u64_stats_update_end(&ptr->syncp); \
- preempt_enable(); \
- } while (0)
+ SNMP_ADD_STATS64_USER(mib, field, addend)
+
#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib[0]) *ptr; \
- preempt_disable(); \
- ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ __typeof__(*mib) *ptr; \
+ ptr = __this_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
u64_stats_update_end(&ptr->syncp); \
- preempt_enable(); \
} while (0)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
- __typeof__(*mib[0]) *ptr; \
- ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
- u64_stats_update_begin(&ptr->syncp); \
- ptr->mibs[basefield##PKTS]++; \
- ptr->mibs[basefield##OCTETS] += addend; \
- u64_stats_update_end(&ptr->syncp); \
+ local_bh_disable(); \
+ SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \
+ local_bh_enable(); \
} while (0)
#else
#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
diff --git a/include/net/sock.h b/include/net/sock.h
index 717cfbf649d..15635074570 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -40,18 +40,25 @@
#ifndef _SOCK_H
#define _SOCK_H
+#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
-#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/mm.h>
#include <linux/security.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
+#include <linux/static_key.h>
+#include <linux/aio.h>
+#include <linux/sched.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
@@ -61,6 +68,22 @@
#include <net/dst.h>
#include <net/checksum.h>
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
+#else
+static inline
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
+{
+ return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
+{
+}
+#endif
/*
* This structure really needs to be cleaned up.
* Most of it is for TCP, and not used by any of
@@ -74,8 +97,8 @@
printk(KERN_DEBUG msg); } while (0)
#else
/* Validate arguments and do nothing */
-static inline void __attribute__ ((format (printf, 2, 3)))
-SOCK_DEBUG(struct sock *sk, const char *msg, ...)
+static inline __printf(2, 3)
+void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
{
}
#endif
@@ -103,44 +126,62 @@ struct sock;
struct proto;
struct net;
+typedef __u32 __bitwise __portpair;
+typedef __u64 __bitwise __addrpair;
+
/**
* struct sock_common - minimal network layer representation of sockets
- * @skc_node: main hash linkage for various protocol lookup tables
- * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
- * @skc_refcnt: reference count
- * @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_daddr: Foreign IPv4 addr
+ * @skc_rcv_saddr: Bound local IPv4 addr
* @skc_hash: hash value used with various protocol lookup tables
* @skc_u16hashes: two u16 hash values used by UDP lookup tables
+ * @skc_dport: placeholder for inet_dport/tw_dport
+ * @skc_num: placeholder for inet_num/tw_num
* @skc_family: network address family
* @skc_state: Connection state
* @skc_reuse: %SO_REUSEADDR setting
+ * @skc_reuseport: %SO_REUSEPORT setting
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
+ * @skc_node: main hash linkage for various protocol lookup tables
+ * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ * @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_refcnt: reference count
*
* This is the minimal network layer representation of sockets, the header
* for struct sock and struct inet_timewait_sock.
*/
struct sock_common {
- /*
- * first fields are not copied in sock_copy()
+ /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
+ * address on 64bit arches : cf INET_MATCH()
*/
union {
- struct hlist_node skc_node;
- struct hlist_nulls_node skc_nulls_node;
+ __addrpair skc_addrpair;
+ struct {
+ __be32 skc_daddr;
+ __be32 skc_rcv_saddr;
+ };
};
- atomic_t skc_refcnt;
- int skc_tx_queue_mapping;
-
union {
unsigned int skc_hash;
__u16 skc_u16hashes[2];
};
+ /* skc_dport && skc_num must be grouped as well */
+ union {
+ __portpair skc_portpair;
+ struct {
+ __be16 skc_dport;
+ __u16 skc_num;
+ };
+ };
+
unsigned short skc_family;
volatile unsigned char skc_state;
- unsigned char skc_reuse;
+ unsigned char skc_reuse:4;
+ unsigned char skc_reuseport:4;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
@@ -150,8 +191,31 @@ struct sock_common {
#ifdef CONFIG_NET_NS
struct net *skc_net;
#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr skc_v6_daddr;
+ struct in6_addr skc_v6_rcv_saddr;
+#endif
+
+ /*
+ * fields between dontcopy_begin/dontcopy_end
+ * are not copied in sock_copy()
+ */
+ /* private: */
+ int skc_dontcopy_begin[0];
+ /* public: */
+ union {
+ struct hlist_node skc_node;
+ struct hlist_nulls_node skc_nulls_node;
+ };
+ int skc_tx_queue_mapping;
+ atomic_t skc_refcnt;
+ /* private: */
+ int skc_dontcopy_end[0];
+ /* public: */
};
+struct cg_proto;
/**
* struct sock - network layer representation of sockets
* @__sk_common: shared layout with inet_timewait_sock
@@ -160,10 +224,10 @@ struct sock_common {
* @sk_lock: synchronizer
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
+ * @sk_rx_dst: receive input route used by early demux
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
- * @sk_rmem_alloc: receive queue bytes committed
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue
@@ -171,15 +235,21 @@ struct sock_common {
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_napi_id: id of the last napi context to receive data for sk
+ * @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
+ * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+ * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
- * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
+ * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
+ * @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
+ * @sk_gso_max_segs: Maximum number of GSO segments
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -193,6 +263,7 @@ struct sock_common {
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_priority: %SO_PRIORITY setting
+ * @sk_cgrp_prioidx: socket group's priority map index
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
* @sk_peer_pid: &struct pid for this socket's peer
@@ -207,12 +278,13 @@ struct sock_common {
* @sk_stamp: time stamp of last packet received
* @sk_socket: Identd and reporting IO signals
* @sk_user_data: RPC layer private data
- * @sk_sndmsg_page: cached page for sendmsg
- * @sk_sndmsg_off: cached offset for sendmsg
+ * @sk_frag: cached page frag
+ * @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_classid: this socket's cgroup classid
+ * @sk_cgrp: this socket's cgroup-specific proto data
* @sk_write_pending: a write to stream socket waits to start
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
@@ -232,15 +304,26 @@ struct sock {
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
-#define sk_copy_start __sk_common.skc_hash
+#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
+#define sk_dontcopy_end __sk_common.skc_dontcopy_end
#define sk_hash __sk_common.skc_hash
+#define sk_portpair __sk_common.skc_portpair
+#define sk_num __sk_common.skc_num
+#define sk_dport __sk_common.skc_dport
+#define sk_addrpair __sk_common.skc_addrpair
+#define sk_daddr __sk_common.skc_daddr
+#define sk_rcv_saddr __sk_common.skc_rcv_saddr
#define sk_family __sk_common.skc_family
#define sk_state __sk_common.skc_state
#define sk_reuse __sk_common.skc_reuse
+#define sk_reuseport __sk_common.skc_reuseport
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
+#define sk_v6_daddr __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
/*
@@ -262,11 +345,15 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int sk_napi_id;
+ unsigned int sk_ll_usec;
+#endif
atomic_t sk_drops;
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
- struct socket_wq *sk_wq;
+ struct socket_wq __rcu *sk_wq;
#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
@@ -276,7 +363,8 @@ struct sock {
struct xfrm_policy *sk_policy[2];
#endif
unsigned long sk_flags;
- struct dst_entry *sk_dst_cache;
+ struct dst_entry *sk_rx_dst;
+ struct dst_entry __rcu *sk_dst_cache;
spinlock_t sk_dst_lock;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
@@ -284,17 +372,21 @@ struct sock {
struct sk_buff_head sk_write_queue;
kmemcheck_bitfield_begin(flags);
unsigned int sk_shutdown : 2,
- sk_no_check : 2,
+ sk_no_check_tx : 1,
+ sk_no_check_rx : 1,
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
- int sk_route_caps;
- int sk_route_nocaps;
+ u32 sk_pacing_rate; /* bytes per second */
+ u32 sk_max_pacing_rate;
+ netdev_features_t sk_route_caps;
+ netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
+ u16 sk_gso_max_segs;
int sk_rcvlowat;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
@@ -305,6 +397,9 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
+ __u32 sk_cgrp_prioidx;
+#endif
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
@@ -314,24 +409,65 @@ struct sock {
ktime_t sk_stamp;
struct socket *sk_socket;
void *sk_user_data;
- struct page *sk_sndmsg_page;
+ struct page_frag sk_frag;
struct sk_buff *sk_send_head;
- __u32 sk_sndmsg_off;
+ __s32 sk_peek_off;
int sk_write_pending;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
__u32 sk_mark;
u32 sk_classid;
+ struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
- int (*sk_backlog_rcv)(struct sock *sk,
- struct sk_buff *skb);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
};
+#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+
+#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
+#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
+
+/*
+ * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+ * or not whether his port will be reused by someone else. SK_FORCE_REUSE
+ * on a socket means that the socket will reuse everybody else's port
+ * without looking at the other's sk_reuse value.
+ */
+
+#define SK_NO_REUSE 0
+#define SK_CAN_REUSE 1
+#define SK_FORCE_REUSE 2
+
+static inline int sk_peek_offset(struct sock *sk, int flags)
+{
+ if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
+ return sk->sk_peek_off;
+ else
+ return 0;
+}
+
+static inline void sk_peek_offset_bwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0) {
+ if (sk->sk_peek_off >= val)
+ sk->sk_peek_off -= val;
+ else
+ sk->sk_peek_off = 0;
+ }
+}
+
+static inline void sk_peek_offset_fwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0)
+ sk->sk_peek_off += val;
+}
+
/*
* Hashed lists helper routines
*/
@@ -374,40 +510,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk)
NULL;
}
-static inline int sk_unhashed(const struct sock *sk)
+static inline bool sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->sk_node);
}
-static inline int sk_hashed(const struct sock *sk)
+static inline bool sk_hashed(const struct sock *sk)
{
return !sk_unhashed(sk);
}
-static __inline__ void sk_node_init(struct hlist_node *node)
+static inline void sk_node_init(struct hlist_node *node)
{
node->pprev = NULL;
}
-static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
+static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
{
node->pprev = NULL;
}
-static __inline__ void __sk_del_node(struct sock *sk)
+static inline void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
}
/* NB: equivalent to hlist_del_init_rcu */
-static __inline__ int __sk_del_node_init(struct sock *sk)
+static inline bool __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
__sk_del_node(sk);
sk_node_init(&sk->sk_node);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* Grab socket reference count. This operation is valid only
@@ -429,9 +565,9 @@ static inline void __sock_put(struct sock *sk)
atomic_dec(&sk->sk_refcnt);
}
-static __inline__ int sk_del_node_init(struct sock *sk)
+static inline bool sk_del_node_init(struct sock *sk)
{
- int rc = __sk_del_node_init(sk);
+ bool rc = __sk_del_node_init(sk);
if (rc) {
/* paranoid for a while -acme */
@@ -442,18 +578,18 @@ static __inline__ int sk_del_node_init(struct sock *sk)
}
#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
-static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
{
if (sk_hashed(sk)) {
hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
{
- int rc = __sk_nulls_del_node_init_rcu(sk);
+ bool rc = __sk_nulls_del_node_init_rcu(sk);
if (rc) {
/* paranoid for a while -acme */
@@ -463,63 +599,71 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
return rc;
}
-static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->sk_node, list);
}
-static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
__sk_add_node(sk, list);
}
-static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
hlist_add_head_rcu(&sk->sk_node, list);
}
-static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
-static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
__sk_nulls_add_node_rcu(sk, list);
}
-static __inline__ void __sk_del_bind_node(struct sock *sk)
+static inline void __sk_del_bind_node(struct sock *sk)
{
__hlist_del(&sk->sk_bind_node);
}
-static __inline__ void sk_add_bind_node(struct sock *sk,
+static inline void sk_add_bind_node(struct sock *sk,
struct hlist_head *list)
{
hlist_add_head(&sk->sk_bind_node, list);
}
-#define sk_for_each(__sk, node, list) \
- hlist_for_each_entry(__sk, node, list, sk_node)
-#define sk_for_each_rcu(__sk, node, list) \
- hlist_for_each_entry_rcu(__sk, node, list, sk_node)
+#define sk_for_each(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_node)
+#define sk_for_each_rcu(__sk, list) \
+ hlist_for_each_entry_rcu(__sk, list, sk_node)
#define sk_nulls_for_each(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
-#define sk_for_each_from(__sk, node) \
- if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
- hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_for_each_from(__sk) \
+ hlist_for_each_entry_from(__sk, sk_node)
#define sk_nulls_for_each_from(__sk, node) \
if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
-#define sk_for_each_safe(__sk, node, tmp, list) \
- hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
-#define sk_for_each_bound(__sk, node, list) \
- hlist_for_each_entry(__sk, node, list, sk_bind_node)
+#define sk_for_each_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind_node)
+
+static inline struct user_namespace *sk_user_ns(struct sock *sk)
+{
+ /* Careful only use this in a context where these parameters
+ * can not change and must all be valid, such as recvmsg from
+ * userspace.
+ */
+ return sk->sk_socket->file->f_cred->user_ns;
+}
/* Sock flags */
enum sock_flags {
@@ -538,6 +682,7 @@ enum sock_flags {
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
+ SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
@@ -547,6 +692,14 @@ enum sock_flags {
SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
SOCK_FASYNC, /* fasync() active */
SOCK_RXQ_OVFL,
+ SOCK_ZEROCOPY, /* buffers from userspace */
+ SOCK_WIFI_STATUS, /* push wifi status to userspace */
+ SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
+ * Will use last 4 bytes of packet sent from
+ * user-space instead.
+ */
+ SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
+ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -564,11 +717,31 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
__clear_bit(flag, &sk->sk_flags);
}
-static inline int sock_flag(struct sock *sk, enum sock_flags flag)
+static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
{
return test_bit(flag, &sk->sk_flags);
}
+#ifdef CONFIG_NET
+extern struct static_key memalloc_socks;
+static inline int sk_memalloc_socks(void)
+{
+ return static_key_false(&memalloc_socks);
+}
+#else
+
+static inline int sk_memalloc_socks(void)
+{
+ return 0;
+}
+
+#endif
+
+static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
+{
+ return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
+}
+
static inline void sk_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
@@ -579,7 +752,7 @@ static inline void sk_acceptq_added(struct sock *sk)
sk->sk_ack_backlog++;
}
-static inline int sk_acceptq_is_full(struct sock *sk)
+static inline bool sk_acceptq_is_full(const struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
@@ -587,22 +760,17 @@ static inline int sk_acceptq_is_full(struct sock *sk)
/*
* Compute minimal free write space needed to queue new packets.
*/
-static inline int sk_stream_min_wspace(struct sock *sk)
+static inline int sk_stream_min_wspace(const struct sock *sk)
{
return sk->sk_wmem_queued >> 1;
}
-static inline int sk_stream_wspace(struct sock *sk)
+static inline int sk_stream_wspace(const struct sock *sk)
{
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
-extern void sk_stream_write_space(struct sock *sk);
-
-static inline int sk_stream_memory_free(struct sock *sk)
-{
- return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
+void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -621,18 +789,22 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
/*
* Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
*/
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+ unsigned int limit)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize + skb->truesize > sk->sk_rcvbuf;
+ return qsize > limit;
}
/* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+ unsigned int limit)
{
- if (sk_rcvqueues_full(sk, skb))
+ if (sk_rcvqueues_full(sk, skb, limit))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
@@ -640,45 +812,73 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
return 0;
}
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ return __sk_backlog_rcv(sk, skb);
+
return sk->sk_backlog_rcv(sk, skb);
}
-static inline void sock_rps_record_flow(const struct sock *sk)
+static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
-static inline void sock_rps_reset_flow(const struct sock *sk)
+static inline void sock_rps_reset_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rps_reset_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
-static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_record_flow_hash(sk->sk_rxhash);
+#endif
+}
+
+static inline void sock_rps_reset_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_reset_flow_hash(sk->sk_rxhash);
+#endif
+}
+
+static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != rxhash)) {
+ if (unlikely(sk->sk_rxhash != skb->hash)) {
sock_rps_reset_flow(sk);
- sk->sk_rxhash = rxhash;
+ sk->sk_rxhash = skb->hash;
}
#endif
}
+static inline void sock_rps_reset_rxhash(struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_reset_flow(sk);
+ sk->sk_rxhash = 0;
+#endif
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
@@ -691,44 +891,59 @@ static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
__rc; \
})
-extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
-extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
-extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
-extern int sk_stream_error(struct sock *sk, int flags, int err);
-extern void sk_stream_kill_queues(struct sock *sk);
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
-extern int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
struct timewait_sock_ops;
struct inet_hashinfo;
struct raw_hashinfo;
+struct module;
+
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ size - offsetof(struct sock, sk_node.pprev));
+}
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
*/
struct proto {
- void (*close)(struct sock *sk,
+ void (*close)(struct sock *sk,
long timeout);
int (*connect)(struct sock *sk,
- struct sockaddr *uaddr,
+ struct sockaddr *uaddr,
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
- struct sock * (*accept) (struct sock *sk, int flags, int *err);
+ struct sock * (*accept)(struct sock *sk, int flags, int *err);
int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
- int (*setsockopt)(struct sock *sk, int level,
+ int (*setsockopt)(struct sock *sk, int level,
int optname, char __user *optval,
unsigned int optlen);
- int (*getsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *option);
+ int (*getsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *option);
#ifdef CONFIG_COMPAT
int (*compat_setsockopt)(struct sock *sk,
int level,
@@ -738,32 +953,39 @@ struct proto {
int level,
int optname, char __user *optval,
int __user *option);
+ int (*compat_ioctl)(struct sock *sk,
+ unsigned int cmd, unsigned long arg);
#endif
int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg,
- size_t len, int noblock, int flags,
- int *addr_len);
+ size_t len, int noblock, int flags,
+ int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
- int (*bind)(struct sock *sk,
+ int (*bind)(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
- int (*backlog_rcv) (struct sock *sk,
+ int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ void (*release_cb)(struct sock *sk);
+ void (*mtu_reduced)(struct sock *sk);
+
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
+ void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
unsigned int inuse_idx;
#endif
+ bool (*stream_memory_free)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
@@ -804,10 +1026,60 @@ struct proto {
#ifdef SOCK_REFCNT_DEBUG
atomic_t socks;
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ /*
+ * cgroup specific init/deinit functions. Called once for all
+ * protocols that implement it, from cgroups populate function.
+ * This function has to setup any files the protocol want to
+ * appear in the kmem cgroup filesystem.
+ */
+ int (*init_cgroup)(struct mem_cgroup *memcg,
+ struct cgroup_subsys *ss);
+ void (*destroy_cgroup)(struct mem_cgroup *memcg);
+ struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
+struct cg_proto {
+ struct res_counter memory_allocated; /* Current allocated memory. */
+ struct percpu_counter sockets_allocated; /* Current number of sockets. */
+ int memory_pressure;
+ long sysctl_mem[3];
+ unsigned long flags;
+ /*
+ * memcg field is used to find which memcg we belong directly
+ * Each memcg struct can hold more than one cg_proto, so container_of
+ * won't really cut.
+ *
+ * The elegant solution would be having an inverse function to
+ * proto_cgroup in struct proto, but that means polluting the structure
+ * for everybody, instead of just for memcg users.
+ */
+ struct mem_cgroup *memcg;
};
-extern int proto_register(struct proto *prot, int alloc_slab);
-extern void proto_unregister(struct proto *prot);
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
+
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
@@ -834,13 +1106,227 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
+extern struct static_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+ struct cg_proto *cg_proto)
+{
+ return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+ struct cg_proto *cg_proto)
+{
+ return NULL;
+}
+#endif
+
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+ sk->sk_prot->stream_memory_free(sk) : true;
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
+ sk_stream_memory_free(sk);
+}
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+ if (!sk->sk_prot->memory_pressure)
+ return false;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return !!sk->sk_cgrp->memory_pressure;
+
+ return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+ int *memory_pressure = sk->sk_prot->memory_pressure;
+
+ if (!memory_pressure)
+ return;
+
+ if (*memory_pressure)
+ *memory_pressure = 0;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+ struct proto *prot = sk->sk_prot;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ cg_proto->memory_pressure = 0;
+ }
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+ if (!sk->sk_prot->enter_memory_pressure)
+ return;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+ struct proto *prot = sk->sk_prot;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ cg_proto->memory_pressure = 1;
+ }
+
+ sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+ long *prot = sk->sk_prot->sysctl_mem;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ prot = sk->sk_cgrp->sysctl_mem;
+ return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+ unsigned long amt,
+ int *parent_status)
+{
+ struct res_counter *fail;
+ int ret;
+
+ ret = res_counter_charge_nofail(&prot->memory_allocated,
+ amt << PAGE_SHIFT, &fail);
+ if (ret < 0)
+ *parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+ unsigned long amt)
+{
+ res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+ u64 ret;
+ ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
+ return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return memcg_memory_allocated_read(sk->sk_cgrp);
+
+ return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+ /* update the root cgroup regardless */
+ atomic_long_add_return(amt, prot->memory_allocated);
+ return memcg_memory_allocated_read(sk->sk_cgrp);
+ }
+
+ return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+ atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ percpu_counter_dec(&cg_proto->sockets_allocated);
+ }
+
+ percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ percpu_counter_inc(&cg_proto->sockets_allocated);
+ }
+
+ percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
+
+ return percpu_counter_read_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+ return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+ return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+ if (!prot->memory_pressure)
+ return false;
+ return !!*prot->memory_pressure;
+}
+
#ifdef CONFIG_PROC_FS
/* Called with local bh disabled */
-extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
-extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
#else
-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
{
}
@@ -856,6 +1342,8 @@ static inline void __sk_prot_rehash(struct sock *sk)
sk->sk_prot->hash(sk);
}
+void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
+
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)
@@ -912,8 +1400,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
/*
* Functions for memory accounting
*/
-extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reclaim(struct sock *sk);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -925,26 +1413,28 @@ static inline int sk_mem_pages(int amt)
return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
}
-static inline int sk_has_account(struct sock *sk)
+static inline bool sk_has_account(struct sock *sk)
{
/* return true if protocol supports memory accounting */
return !!sk->sk_prot->memory_allocated;
}
-static inline int sk_wmem_schedule(struct sock *sk, int size)
+static inline bool sk_wmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
- return 1;
+ return true;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, SK_MEM_SEND);
}
-static inline int sk_rmem_schedule(struct sock *sk, int size)
+static inline bool
+sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
if (!sk_has_account(sk))
- return 1;
- return size <= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_RECV);
+ return true;
+ return size<= sk->sk_forward_alloc ||
+ __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
+ skb_pfmemalloc(skb);
}
static inline void sk_mem_reclaim(struct sock *sk)
@@ -1000,6 +1490,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+static inline void sock_release_ownership(struct sock *sk)
+{
+ sk->sk_lock.owned = 0;
+}
+
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -1007,7 +1502,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
* Mark both the sk_lock and the sk_lock.slock as a
* per-address-family lock class.
*/
-#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
@@ -1015,18 +1510,18 @@ do { \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-extern void lock_sock_nested(struct sock *sk, int subclass);
+void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}
-extern void release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
@@ -1035,7 +1530,7 @@ extern void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-extern bool lock_sock_fast(struct sock *sk);
+bool lock_sock_fast(struct sock *sk);
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
@@ -1053,138 +1548,80 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
}
-extern struct sock *sk_alloc(struct net *net, int family,
- gfp_t priority,
- struct proto *prot);
-extern void sk_free(struct sock *sk);
-extern void sk_release_kernel(struct sock *sk);
-extern struct sock *sk_clone(const struct sock *sk,
- const gfp_t priority);
-
-extern struct sk_buff *sock_wmalloc(struct sock *sk,
- unsigned long size, int force,
- gfp_t priority);
-extern struct sk_buff *sock_rmalloc(struct sock *sk,
- unsigned long size, int force,
- gfp_t priority);
-extern void sock_wfree(struct sk_buff *skb);
-extern void sock_rfree(struct sk_buff *skb);
-
-extern int sock_setsockopt(struct socket *sock, int level,
- int op, char __user *optval,
- unsigned int optlen);
-
-extern int sock_getsockopt(struct socket *sock, int level,
- int op, char __user *optval,
- int __user *optlen);
-extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
- unsigned long size,
- int noblock,
- int *errcode);
-extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
- unsigned long header_len,
- unsigned long data_len,
- int noblock,
- int *errcode);
-extern void *sock_kmalloc(struct sock *sk, int size,
- gfp_t priority);
-extern void sock_kfree_s(struct sock *sk, void *mem, int size);
-extern void sk_send_sigurg(struct sock *sk);
-
-#ifdef CONFIG_CGROUPS
-extern void sock_update_classid(struct sock *sk);
-#else
-static inline void sock_update_classid(struct sock *sk)
-{
-}
-#endif
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot);
+void sk_free(struct sock *sk);
+void sk_release_kernel(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority);
+void sock_wfree(struct sk_buff *skb);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_edemux(struct sk_buff *skb);
+
+int sock_setsockopt(struct socket *sock, int level, int op,
+ char __user *optval, unsigned int optlen);
+
+int sock_getsockopt(struct socket *sock, int level, int op,
+ char __user *optval, int __user *optlen);
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+ int noblock, int *errcode);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ unsigned long data_len, int noblock,
+ int *errcode, int max_page_order);
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
/*
* Functions to fill in entries in struct proto_ops when a protocol
* does not implement a particular function.
*/
-extern int sock_no_bind(struct socket *,
- struct sockaddr *, int);
-extern int sock_no_connect(struct socket *,
- struct sockaddr *, int, int);
-extern int sock_no_socketpair(struct socket *,
- struct socket *);
-extern int sock_no_accept(struct socket *,
- struct socket *, int);
-extern int sock_no_getname(struct socket *,
- struct sockaddr *, int *, int);
-extern unsigned int sock_no_poll(struct file *, struct socket *,
- struct poll_table_struct *);
-extern int sock_no_ioctl(struct socket *, unsigned int,
- unsigned long);
-extern int sock_no_listen(struct socket *, int);
-extern int sock_no_shutdown(struct socket *, int);
-extern int sock_no_getsockopt(struct socket *, int , int,
- char __user *, int __user *);
-extern int sock_no_setsockopt(struct socket *, int, int,
- char __user *, unsigned int);
-extern int sock_no_sendmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t);
-extern int sock_no_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
-extern int sock_no_mmap(struct file *file,
- struct socket *sock,
- struct vm_area_struct *vma);
-extern ssize_t sock_no_sendpage(struct socket *sock,
- struct page *page,
- int offset, size_t size,
- int flags);
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
+unsigned int sock_no_poll(struct file *, struct socket *,
+ struct poll_table_struct *);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
+int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
+int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+ int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
* uses the inet style.
*/
-extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags);
-extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
-extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+int compat_sock_common_getsockopt(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
-extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+int compat_sock_common_setsockopt(struct socket *sock, int level,
int optname, char __user *optval, unsigned int optlen);
-extern void sk_common_release(struct sock *sk);
+void sk_common_release(struct sock *sk);
/*
* Default socket callbacks and setup code
*/
-
-/* Initialise core socket variables */
-extern void sock_init_data(struct socket *sock, struct sock *sk);
-
-/**
- * sk_filter_release - release a socket filter
- * @fp: filter to remove
- *
- * Remove a filter from a socket and release its resources.
- */
-
-static inline void sk_filter_release(struct sk_filter *fp)
-{
- if (atomic_dec_and_test(&fp->refcnt))
- kfree(fp);
-}
-
-static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
-{
- unsigned int size = sk_filter_len(fp);
-
- atomic_sub(size, &sk->sk_omem_alloc);
- sk_filter_release(fp);
-}
-static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
-{
- atomic_inc(&fp->refcnt);
- atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
-}
+/* Initialise core socket variables */
+void sock_init_data(struct socket *sock, struct sock *sk);
/*
* Socket reference counting postulates.
@@ -1217,9 +1654,12 @@ static inline void sock_put(struct sock *sk)
if (atomic_dec_and_test(&sk->sk_refcnt))
sk_free(sk);
}
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested);
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
@@ -1244,7 +1684,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
- return &sk->sk_wq->wait;
+ BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+ return &rcu_dereference_raw(sk->sk_wq)->wait;
}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
@@ -1265,21 +1706,20 @@ static inline void sock_orphan(struct sock *sk)
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
write_lock_bh(&sk->sk_callback_lock);
- rcu_assign_pointer(sk->sk_wq, parent->wq);
+ sk->sk_wq = parent->wq;
parent->sk = sk;
sk_set_socket(sk, parent);
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
-extern int sock_i_uid(struct sock *sk);
-extern unsigned long sock_i_ino(struct sock *sk);
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
- sock_owned_by_user(sk) ||
+ return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
lockdep_is_held(&sk->sk_lock.slock));
}
@@ -1290,14 +1730,12 @@ sk_dst_get(struct sock *sk)
rcu_read_lock();
dst = rcu_dereference(sk->sk_dst_cache);
- if (dst)
- dst_hold(dst);
+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ dst = NULL;
rcu_read_unlock();
return dst;
}
-extern void sk_reset_txq(struct sock *sk);
-
static inline void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1307,7 +1745,7 @@ static inline void dst_negative_advice(struct sock *sk)
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_reset_txq(sk);
+ sk_tx_queue_clear(sk);
}
}
}
@@ -1330,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_set(sk, dst);
- spin_unlock(&sk->sk_dst_lock);
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
}
static inline void
@@ -1344,28 +1784,79 @@ __sk_dst_reset(struct sock *sk)
static inline void
sk_dst_reset(struct sock *sk)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_reset(sk);
- spin_unlock(&sk->sk_dst_lock);
+ sk_dst_set(sk, NULL);
}
-extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
-extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
-static inline int sk_can_gso(const struct sock *sk)
+static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
-extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
-static inline void sk_nocaps_add(struct sock *sk, int flags)
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
{
sk->sk_route_nocaps |= flags;
sk->sk_route_caps &= ~flags;
}
+static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+ char __user *from, char *to,
+ int copy, int offset)
+{
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ int err = 0;
+ __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
+ if (err)
+ return err;
+ skb->csum = csum_block_add(skb->csum, csum, offset);
+ } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
+ if (!access_ok(VERIFY_READ, from, copy) ||
+ __copy_from_user_nocache(to, from, copy))
+ return -EFAULT;
+ } else if (copy_from_user(to, from, copy))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
+ char __user *from, int copy)
+{
+ int err, offset = skb->len;
+
+ err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
+ copy, offset);
+ if (err)
+ __skb_trim(skb, offset);
+
+ return err;
+}
+
+static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+ struct sk_buff *skb,
+ struct page *page,
+ int off, int copy)
+{
+ int err;
+
+ err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
+ copy, skb->len);
+ if (err)
+ return err;
+
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+ sk_mem_charge(sk, copy);
+ return 0;
+}
+
static inline int skb_copy_to_page(struct sock *sk, char __user *from,
struct sk_buff *skb, struct page *page,
int off, int copy)
@@ -1417,7 +1908,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk)
*
* Returns true if socket has write or read allocations
*/
-static inline int sk_has_allocations(const struct sock *sk)
+static inline bool sk_has_allocations(const struct sock *sk)
{
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
@@ -1456,9 +1947,7 @@ static inline int sk_has_allocations(const struct sock *sk)
*/
static inline bool wq_has_sleeper(struct socket_wq *wq)
{
-
- /*
- * We need to be sure we are in sync with the
+ /* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier is paired in the sock_poll_wait.
@@ -1478,24 +1967,23 @@ static inline bool wq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
{
- if (p && wait_address) {
+ if (!poll_does_not_wait(p) && wait_address) {
poll_wait(filp, wait_address, p);
- /*
- * We need to be sure we are in sync with the
+ /* We need to be sure we are in sync with the
* socket flags modification.
*
* This memory barrier is paired in the wq_has_sleeper.
- */
+ */
smp_mb();
}
}
/*
- * Queue a received datagram if it will fit. Stream and sequenced
+ * Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
* and play with them.
*
- * Inlined as it's very short and called for pretty much every
+ * Inlined as it's very short and called for pretty much every
* packet ever received.
*/
@@ -1521,19 +2009,19 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
- unsigned long expires);
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+ unsigned long expires);
-extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
-extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
/*
* Recover an error report and clear atomically
*/
-
+
static inline int sock_error(struct sock *sk)
{
int err;
@@ -1549,7 +2037,7 @@ static inline unsigned long sock_wspace(struct sock *sk)
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
- if (amt < 0)
+ if (amt < 0)
amt = 0;
}
return amt;
@@ -1561,39 +2049,47 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
sock_wake_async(sk->sk_socket, how, band);
}
-#define SOCK_MIN_SNDBUF 2048
-/*
- * Since sk_rmem_alloc sums skb->truesize, even a small frame might need
- * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak
+/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
+ * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
+ * Note: for send buffers, TCP works better if we can build two skbs at
+ * minimum.
*/
-#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
+#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
+
+#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
+#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
- sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+ sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
-static inline struct page *sk_stream_alloc_page(struct sock *sk)
+/**
+ * sk_page_frag - return an appropriate page_frag
+ * @sk: socket
+ *
+ * If socket allocation mode allows current thread to sleep, it means its
+ * safe to use the per task page_frag instead of the per socket one.
+ */
+static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- struct page *page = NULL;
+ if (sk->sk_allocation & __GFP_WAIT)
+ return &current->task_frag;
- page = alloc_pages(sk->sk_allocation, 0);
- if (!page) {
- sk->sk_prot->enter_memory_pressure(sk);
- sk_stream_moderate_sndbuf(sk);
- }
- return page;
+ return &sk->sk_frag;
}
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
-static inline int sock_writeable(const struct sock *sk)
+static inline bool sock_writeable(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
}
@@ -1603,12 +2099,12 @@ static inline gfp_t gfp_any(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
-static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
+static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}
-static inline long sock_sndtimeo(const struct sock *sk, int noblock)
+static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}
@@ -1626,10 +2122,12 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
-extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
-static __inline__ void
+static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
@@ -1655,19 +2153,21 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
__sock_recv_timestamp(msg, sk, skb);
else
sk->sk_stamp = kt;
+
+ if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+ __sock_recv_wifi_status(msg, sk, skb);
}
-extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
- (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
+ (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
(1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
if (sk->sk_flags & FLAGS_TS_OR_DROPS)
@@ -1681,10 +2181,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
* @sk: socket sending this packet
* @tx_flags: filled with instructions for time stamping
*
- * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
- * parameters are invalid.
+ * Currently only depends on SOCK_TIMESTAMPING* flags.
*/
-extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
/**
* sk_eat_skb - Release a skb if it is no longer needed
@@ -1696,7 +2195,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
* locked so that the sk_buff queue operation is ok.
*/
#ifdef CONFIG_NET_DMA
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
if (!copied_early)
@@ -1705,7 +2204,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
}
#else
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
@@ -1726,19 +2225,23 @@ void sock_net_set(struct sock *sk, struct net *net)
/*
* Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a referrence to a namespace in order to allow
+ * They should not hold a reference to a namespace in order to allow
* to stop it.
* Sockets after sk_change_net should be released using sk_release_kernel
*/
static inline void sk_change_net(struct sock *sk, struct net *net)
{
- put_net(sock_net(sk));
- sock_net_set(sk, hold_net(net));
+ struct net *current_net = sock_net(sk);
+
+ if (!net_eq(current_net, net)) {
+ put_net(current_net);
+ sock_net_set(sk, hold_net(net));
+ }
}
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
- if (unlikely(skb->sk)) {
+ if (skb->sk) {
struct sock *sk = skb->sk;
skb->destructor = NULL;
@@ -1748,12 +2251,19 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
-extern void sock_enable_timestamp(struct sock *sk, int flag);
-extern int sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+void sock_enable_timestamp(struct sock *sk, int flag);
+int sock_get_timestamp(struct sock *, struct timeval __user *);
+int sock_get_timestampns(struct sock *, struct timespec __user *);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+ int type);
+
+bool sk_ns_capable(const struct sock *sk,
+ struct user_namespace *user_ns, int cap);
+bool sk_capable(const struct sock *sk, int cap);
+bool sk_net_capable(const struct sock *sk, int cap);
-/*
- * Enable debug/info messages
+/*
+ * Enable debug/info messages
*/
extern int net_msg_warn;
#define NETDEBUG(fmt, args...) \
@@ -1765,8 +2275,6 @@ extern int net_msg_warn;
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-extern void sk_init(void);
-
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
diff --git a/include/net/stp.h b/include/net/stp.h
index ad447f10541..3af174d70d9 100644
--- a/include/net/stp.h
+++ b/include/net/stp.h
@@ -8,7 +8,7 @@ struct stp_proto {
void *data;
};
-extern int stp_proto_register(const struct stp_proto *proto);
-extern void stp_proto_unregister(const struct stp_proto *proto);
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
#endif /* _NET_STP_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9e8710be7a0..fa8f5fac65e 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -9,7 +9,7 @@ struct tcf_csum {
u32 update_flags;
};
-#define to_tcf_csum(pc) \
- container_of(pc,struct tcf_csum,common)
+#define to_tcf_csum(a) \
+ container_of(a->priv,struct tcf_csum,common)
#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
index 65f024b8095..9763dcbb9bc 100644
--- a/include/net/tc_act/tc_defact.h
+++ b/include/net/tc_act/tc_defact.h
@@ -8,7 +8,7 @@ struct tcf_defact {
u32 tcfd_datalen;
void *tcfd_defdata;
};
-#define to_defact(pc) \
- container_of(pc, struct tcf_defact, common)
+#define to_defact(a) \
+ container_of(a->priv, struct tcf_defact, common)
#endif /* __NET_TC_DEF_H */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9e3f6767b80..9fc9b578908 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -11,7 +11,7 @@ struct tcf_gact {
int tcfg_paction;
#endif
};
-#define to_gact(pc) \
- container_of(pc, struct tcf_gact, common)
+#define to_gact(a) \
+ container_of(a->priv, struct tcf_gact, common)
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
index f7d25dfcc4b..c0f4193f432 100644
--- a/include/net/tc_act/tc_ipt.h
+++ b/include/net/tc_act/tc_ipt.h
@@ -11,7 +11,7 @@ struct tcf_ipt {
char *tcfi_tname;
struct xt_entry_target *tcfi_t;
};
-#define to_ipt(pc) \
- container_of(pc, struct tcf_ipt, common)
+#define to_ipt(a) \
+ container_of(a->priv, struct tcf_ipt, common)
#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index cfe2943690f..4dd77a1c106 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -11,7 +11,7 @@ struct tcf_mirred {
struct net_device *tcfm_dev;
struct list_head tcfm_list;
};
-#define to_mirred(pc) \
- container_of(pc, struct tcf_mirred, common)
+#define to_mirred(a) \
+ container_of(a->priv, struct tcf_mirred, common)
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index 4a691f34d70..63d8e9ca9d9 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -13,9 +13,9 @@ struct tcf_nat {
u32 flags;
};
-static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc)
+static inline struct tcf_nat *to_tcf_nat(struct tc_action *a)
{
- return container_of(pc, struct tcf_nat, common);
+ return container_of(a->priv, struct tcf_nat, common);
}
#endif /* __NET_TC_NAT_H */
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index e6f6e15956f..5b80998879c 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -9,7 +9,7 @@ struct tcf_pedit {
unsigned char tcfp_flags;
struct tc_pedit_key *tcfp_keys;
};
-#define to_pedit(pc) \
- container_of(pc, struct tcf_pedit, common)
+#define to_pedit(a) \
+ container_of(a->priv, struct tcf_pedit, common)
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index e103fe02f37..0df9a0db4a8 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
@@ -30,7 +29,7 @@ struct tcf_skbedit {
u16 queue_mapping;
/* XXX: 16-bit pad here? */
};
-#define to_skbedit(pc) \
- container_of(pc, struct tcf_skbedit, common)
+#define to_skbedit(a) \
+ container_of(a->priv, struct tcf_skbedit, common)
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3f227baee4b..7286db80e8b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -18,11 +18,11 @@
#ifndef _TCP_H
#define _TCP_H
-#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
#include <linux/list.h>
#include <linux/tcp.h>
+#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
@@ -31,6 +31,7 @@
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/kref.h>
+#include <linux/ktime.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -45,11 +46,12 @@
#include <net/dst.h>
#include <linux/seq_file.h>
+#include <linux/memcontrol.h>
extern struct inet_hashinfo tcp_hashinfo;
extern struct percpu_counter tcp_orphan_count;
-extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
@@ -94,11 +96,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
* 15 is ~13-30min depending on RTO.
*/
-#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
- * connection: ~180sec is RFC minimum */
+#define TCP_SYN_RETRIES 6 /* This is how many retries are done
+ * when active opening a connection.
+ * RFC1122 says the minimum retry MUST
+ * be at least 180secs. Nevertheless
+ * this value is corresponding to
+ * 63secs of retransmission with the
+ * current initial RTO.
+ */
-#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
- * connection: ~180sec is RFC minimum */
+#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
+ * when passive opening a connection.
+ * This is corresponding to 31secs of
+ * retransmission with the current
+ * initial RTO.
+ */
#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
* state, about 60 seconds */
@@ -119,7 +131,13 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#endif
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
-#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
+#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
+ * used as a fallback RTO for the
+ * initial data transmission if no
+ * valid RTT sample has been acquired,
+ * most likely due to retrans in 3WHS.
+ */
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
@@ -159,7 +177,11 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
-#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
+#define TCPOPT_EXP 254 /* Experimental */
+/* Magic number to be after the option value for sharing TCP
+ * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
+ */
+#define TCPOPT_FASTOPEN_MAGIC 0xF989
/*
* TCP option lengths
@@ -170,10 +192,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
-#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
-#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
-#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
-#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
+#define TCPOLEN_EXP_FASTOPEN_BASE 4
/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12
@@ -193,6 +212,23 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP thin-stream limits */
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND 10
+
+/* Bit Flags for sysctl_tcp_fastopen */
+#define TFO_CLIENT_ENABLE 1
+#define TFO_SERVER_ENABLE 2
+#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
+
+/* Accept SYN data w/o any cookie option */
+#define TFO_SERVER_COOKIE_NOT_REQD 0x200
+
+/* Force enable TFO on all listeners, i.e., not requiring the
+ * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ */
+#define TFO_SERVER_WO_SOCKOPT1 0x400
+#define TFO_SERVER_WO_SOCKOPT2 0x800
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -209,6 +245,7 @@ extern int sysctl_tcp_retries1;
extern int sysctl_tcp_retries2;
extern int sysctl_tcp_orphan_retries;
extern int sysctl_tcp_syncookies;
+extern int sysctl_tcp_fastopen;
extern int sysctl_tcp_retrans_collapse;
extern int sysctl_tcp_stdurg;
extern int sysctl_tcp_rfc1337;
@@ -216,7 +253,6 @@ extern int sysctl_tcp_abort_on_overflow;
extern int sysctl_tcp_max_orphans;
extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
-extern int sysctl_tcp_ecn;
extern int sysctl_tcp_dsack;
extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
@@ -225,21 +261,23 @@ extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
-extern int sysctl_tcp_frto_response;
extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
-extern int sysctl_tcp_abc;
extern int sysctl_tcp_mtu_probing;
extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
-extern int sysctl_tcp_max_ssthresh;
-extern int sysctl_tcp_cookie_size;
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
+extern int sysctl_tcp_early_retrans;
+extern int sysctl_tcp_limit_output_bytes;
+extern int sysctl_tcp_challenge_ack_limit;
+extern unsigned int sysctl_tcp_notsent_lowat;
+extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_autocorking;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -250,18 +288,26 @@ extern int tcp_memory_pressure;
* and worry about wraparound (automatic with unsigned arithmetic).
*/
-static inline int before(__u32 seq1, __u32 seq2)
+static inline bool before(__u32 seq1, __u32 seq2)
{
return (__s32)(seq1-seq2) < 0;
}
#define after(seq2, seq1) before(seq1, seq2)
/* is s2<=s1<=s3 ? */
-static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
{
return seq3 - seq2 >= seq1 - seq2;
}
+static inline bool tcp_out_of_memory(struct sock *sk)
+{
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
+ return true;
+ return false;
+}
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -272,13 +318,11 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
if (orphans << shift > sysctl_tcp_max_orphans)
return true;
}
-
- if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
- return true;
return false;
}
+bool tcp_check_oom(struct sock *sk, int shift);
+
/* syncookies: remember time of last synqueue overflow */
static inline void tcp_synq_overflow(struct sock *sk)
{
@@ -286,10 +330,10 @@ static inline void tcp_synq_overflow(struct sock *sk)
}
/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
+ return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
}
extern struct proto tcp_prot;
@@ -300,31 +344,36 @@ extern struct proto tcp_prot;
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
-extern void tcp_v4_err(struct sk_buff *skb, u32);
-
-extern void tcp_shutdown (struct sock *sk, int how);
-
-extern int tcp_v4_rcv(struct sk_buff *skb);
-
-extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
-extern void *tcp_v4_tw_get_peer(struct sock *sk);
-extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
-extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
-extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
-extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
-extern void tcp_rcv_space_adjust(struct sock *sk);
-extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
-extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
-extern void tcp_twsk_destructor(struct sock *sk);
-extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
+void tcp_tasklet_init(void);
+
+void tcp_v4_err(struct sk_buff *skb, u32);
+
+void tcp_shutdown(struct sock *sk, int how);
+
+void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
+
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t size);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+ int flags);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
+void tcp_rcv_space_adjust(struct sock *sk);
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
@@ -344,13 +393,7 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
#define TCP_ECN_OK 1
#define TCP_ECN_QUEUE_CWR 2
#define TCP_ECN_DEMAND_CWR 4
-
-static __inline__ void
-TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
-{
- if (sysctl_tcp_ecn && th->ece && th->cwr)
- inet_rsk(req)->ecn_ok = 1;
-}
+#define TCP_ECN_SEEN 8
enum tcp_tw_status {
TCP_TW_SUCCESS = 0,
@@ -360,110 +403,168 @@ enum tcp_tw_status {
};
-extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
- struct sk_buff *skb,
- const struct tcphdr *th);
-extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct request_sock *req,
- struct request_sock **prev);
-extern int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
-extern int tcp_use_frto(struct sock *sk);
-extern void tcp_enter_frto(struct sock *sk);
-extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_sock *tp);
-extern void tcp_update_metrics(struct sock *sk);
-extern void tcp_close(struct sock *sk, long timeout);
-extern unsigned int tcp_poll(struct file * file, struct socket *sock,
- struct poll_table_struct *wait);
-extern int tcp_getsockopt(struct sock *sk, int level, int optname,
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+ struct sk_buff *skb,
+ const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req, struct request_sock **prev,
+ bool fastopen);
+int tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk, int how);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
+ bool paws_check);
+bool tcp_remember_stamp(struct sock *sk);
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+void tcp_disable_fack(struct tcp_sock *tp);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+unsigned int tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-extern int tcp_setsockopt(struct sock *sk, int level, int optname,
+int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
-extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-extern void tcp_set_keepalive(struct sock *sk, int val);
-extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_options_received *opt_rx, u8 **hvpp,
- int estab);
-extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int nonblock, int flags, int *addr_len);
+void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx,
+ int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* TCP v4 functions exported for the inet6 API
*/
-extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock * tcp_create_openreq_child(struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
-extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst);
-extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
- int addr_len);
-extern int tcp_connect(struct sock *sk);
-extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- struct request_sock *req,
- struct request_values *rvp);
-extern int tcp_disconnect(struct sock *sk, int flags);
-
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(struct sock *sk,
+ struct request_sock *req,
+ struct sk_buff *skb);
+struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc);
+int tcp_disconnect(struct sock *sk, int flags);
+
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
- struct ip_options *opt);
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
- __u16 *mss);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ struct ip_options *opt);
+#ifdef CONFIG_SYN_COOKIES
+
+/* Syncookies use a monotonic timer which increments every 60 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value. A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
-extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
+static inline u32 tcp_cookie_time(void)
+{
+ u64 val = get_jiffies_64();
-/* From net/ipv6/syncookies.c */
-extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
-extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
- __u16 *mss);
+ do_div(val, 60 * HZ);
+ return val;
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ u16 *mssp);
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
+#else
+static inline __u32 cookie_v4_init_sequence(struct sock *sk,
+ struct sk_buff *skb,
+ __u16 *mss)
+{
+ return 0;
+}
+#endif
+__u32 cookie_init_timestamp(struct request_sock *req);
+bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
+ bool *ecn_ok);
+
+/* From net/ipv6/syncookies.c */
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
+ __u16 *mss);
+#else
+static inline __u32 cookie_v6_init_sequence(struct sock *sk,
+ struct sk_buff *skb,
+ __u16 *mss)
+{
+ return 0;
+}
+#endif
/* tcp_output.c */
-extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
- int nonagle);
-extern int tcp_may_send_now(struct sock *sk);
-extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_retransmit_timer(struct sock *sk);
-extern void tcp_xmit_retransmit_queue(struct sock *);
-extern void tcp_simple_retransmit(struct sock *);
-extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-
-extern void tcp_send_probe0(struct sock *);
-extern void tcp_send_partial(struct sock *);
-extern int tcp_write_wakeup(struct sock *);
-extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
-extern int tcp_send_synack(struct sock *);
-extern void tcp_push_one(struct sock *, unsigned int mss_now);
-extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+ int nonagle);
+bool tcp_may_send_now(struct sock *sk);
+int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
+
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
+ const char *proto);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk);
/* tcp_input.c */
-extern void tcp_cwnd_application_limited(struct sock *sk);
+void tcp_resume_early_retransmit(struct sock *sk);
+void tcp_rearm_rto(struct sock *sk);
+void tcp_reset(struct sock *sk);
/* tcp_timer.c */
-extern void tcp_init_xmit_timers(struct sock *);
+void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
inet_csk_clear_xmit_timers(sk);
}
-extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
-extern unsigned int tcp_current_mss(struct sock *sk);
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -489,19 +590,20 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-extern void tcp_get_info(struct sock *, struct tcp_info *);
+void tcp_get_info(const struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
-extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor);
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
-extern void tcp_initialize_rcv_mss(struct sock *sk);
+void tcp_initialize_rcv_mss(struct sock *sk);
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
-extern void tcp_mtup_init(struct sock *sk);
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+void tcp_init_buffer_space(struct sock *sk);
static inline void tcp_bound_rto(const struct sock *sk)
{
@@ -511,7 +613,7 @@ static inline void tcp_bound_rto(const struct sock *sk)
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
{
- return (tp->srtt >> 3) + tp->rttvar;
+ return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
}
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -540,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
/* Compute the actual rto_min value */
static inline u32 tcp_rto_min(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
@@ -548,6 +650,11 @@ static inline u32 tcp_rto_min(struct sock *sk)
return rto_min;
}
+static inline u32 tcp_rto_min_us(struct sock *sk)
+{
+ return jiffies_to_usecs(tcp_rto_min(sk));
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -565,7 +672,9 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
* scaling applied to the result. The caller does these things
* if necessary. This is a "raw" window selection.
*/
-extern u32 __tcp_select_window(struct sock *sk);
+u32 __tcp_select_window(struct sock *sk);
+
+void tcp_send_window_probe(struct sock *sk);
/* TCP timestamps are only 32-bits, this causes a slight
* complication on 64-bit systems since we store a snapshot
@@ -595,28 +704,47 @@ extern u32 __tcp_select_window(struct sock *sk);
struct tcp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */
- __u8 flags; /* TCP header flags. */
+ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
-
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ /* 1 byte hole */
__u32 ack_seq; /* Sequence number ACK'd */
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
+ *
+ * If we receive a SYN packet with these bits set, it means a network is
+ * playing bad games with TOS bits. In order to avoid possible false congestion
+ * notifications, we disable TCP ECN negociation.
+ */
+static inline void
+TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
+ struct net *net)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
+ INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
+ inet_rsk(req)->ecn_ok = 1;
+}
+
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
@@ -636,7 +764,6 @@ enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
CA_EVENT_CWND_RESTART, /* congestion window restart */
CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
- CA_EVENT_FRTO, /* fast recovery timeout */
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_FAST_ACK, /* in sequence ack */
CA_EVENT_SLOW_ACK, /* other ack */
@@ -650,7 +777,6 @@ enum tcp_ca_event {
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
#define TCP_CONG_NON_RESTRICTED 0x1
-#define TCP_CONG_RTT_STAMP 0x2
struct tcp_congestion_ops {
struct list_head list;
@@ -663,10 +789,8 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
- /* lower bound for congestion window (optional) */
- u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */
- void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
+ void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
@@ -682,24 +806,23 @@ struct tcp_congestion_ops {
struct module *owner;
};
-extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_init_congestion_control(struct sock *sk);
-extern void tcp_cleanup_congestion_control(struct sock *sk);
-extern int tcp_set_default_congestion_control(const char *name);
-extern void tcp_get_default_congestion_control(char *name);
-extern void tcp_get_available_congestion_control(char *buf, size_t len);
-extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
-extern int tcp_set_allowed_congestion_control(char *allowed);
-extern int tcp_set_congestion_control(struct sock *sk, const char *name);
-extern void tcp_slow_start(struct tcp_sock *tp);
-extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(const char *name);
+void tcp_get_default_congestion_control(char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name);
+int tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
-extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
-extern u32 tcp_reno_min_cwnd(const struct sock *sk);
+u32 tcp_reno_ssthresh(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -732,19 +855,34 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
return tp->rx_opt.sack_ok;
}
-static inline int tcp_is_reno(const struct tcp_sock *tp)
+static inline bool tcp_is_reno(const struct tcp_sock *tp)
{
return !tcp_is_sack(tp);
}
-static inline int tcp_is_fack(const struct tcp_sock *tp)
+static inline bool tcp_is_fack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok & 2;
+ return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
}
static inline void tcp_enable_fack(struct tcp_sock *tp)
{
- tp->rx_opt.sack_ok |= 2;
+ tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
+}
+
+/* TCP early-retransmit (ER) is similar to but more conservative than
+ * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
+ */
+static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
+{
+ tp->do_early_retrans = sysctl_tcp_early_retrans &&
+ sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
+ sysctl_tcp_reordering == 3;
+}
+
+static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
+{
+ tp->do_early_retrans = 0;
}
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -778,14 +916,21 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
}
+static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
+{
+ return (TCPF_CA_CWR | TCPF_CA_Recovery) &
+ (1 << inet_csk(sk)->icsk_ca_state);
+}
+
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
- * The exception is rate halving phase, when cwnd is decreasing towards
+ * The exception is cwnd reduction phase, when cwnd is decreasing towards
* ssthresh.
*/
static inline __u32 tcp_current_ssthresh(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
+
+ if (tcp_in_cwnd_reduction(sk))
return tp->snd_ssthresh;
else
return max(tp->snd_ssthresh,
@@ -796,18 +941,17 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
*/
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
{
- return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+ return 3;
}
-extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
-
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
* the default reordering threshold - but if reordering increases,
@@ -824,18 +968,34 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
return tp->snd_una + tp->snd_wnd;
}
-extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
-static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
- const struct sk_buff *skb)
+/* We follow the spirit of RFC2861 to validate cwnd but implement a more
+ * flexible approach. The RFC suggests cwnd should not be raised unless
+ * it was fully used previously. And that's exactly what we do in
+ * congestion avoidance mode. But in slow start we allow cwnd to grow
+ * as long as the application has used half the cwnd.
+ * Example :
+ * cwnd is 10 (IW10), but application sends 9 frames.
+ * We allow cwnd to reach 18 when all frames are ACKed.
+ * This check is safe because it's as aggressive as slow start which already
+ * risks 100% overshoot. The advantage is that we discourage application to
+ * either send more filler packets or data to artificially blow up the cwnd
+ * usage, and allow application-limited process to probe bw more aggressively.
+ */
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{
- if (skb->len < mss)
- tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ return tp->snd_cwnd < 2 * tp->max_packets_out;
+
+ return tp->is_cwnd_limited;
}
static inline void tcp_check_probe_timer(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending)
@@ -867,7 +1027,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
return __skb_checksum_complete(skb);
}
-static inline int tcp_checksum_complete(struct sk_buff *skb)
+static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete(skb);
@@ -889,46 +1049,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
#endif
}
-/* Packet is added to VJ-style prequeue for processing in process
- * context, if a reader task is waiting. Apparently, this exciting
- * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
- * failed somewhere. Latency? Burstiness? Well, at least now we will
- * see, why it failed. 8)8) --ANK
- *
- * NOTE: is this not too big to inline?
- */
-static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (sysctl_tcp_low_latency || !tp->ucopy.task)
- return 0;
-
- __skb_queue_tail(&tp->ucopy.prequeue, skb);
- tp->ucopy.memory += skb->truesize;
- if (tp->ucopy.memory > sk->sk_rcvbuf) {
- struct sk_buff *skb1;
-
- BUG_ON(sock_owned_by_user(sk));
-
- while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPPREQUEUEDROPPED);
- }
-
- tp->ucopy.memory = 0;
- } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible_sync_poll(sk_sleep(sk),
- POLLIN | POLLRDNORM | POLLRDBAND);
- if (!inet_csk_ack_scheduled(sk))
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- (3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
- }
- return 1;
-}
-
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -939,9 +1060,9 @@ static const char *statename[]={
"Close Wait","Last ACK","Listen","Closing"
};
#endif
-extern void tcp_set_state(struct sock *sk, int state);
+void tcp_set_state(struct sock *sk, int state);
-extern void tcp_done(struct sock *sk);
+void tcp_done(struct sock *sk);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
@@ -949,11 +1070,12 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
rx_opt->num_sacks = 0;
}
+u32 tcp_default_init_rwnd(u32 mss);
+
/* Determine a window scaling and initial window to offer. */
-extern void tcp_select_initial_window(int __space, __u32 mss,
- __u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale,
- __u32 init_rcv_wnd);
+void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+ __u32 *window_clamp, int wscale_ok,
+ __u8 *rcv_wscale, __u32 init_rcv_wnd);
static inline int tcp_win_from_space(int space)
{
@@ -983,6 +1105,8 @@ static inline void tcp_openreq_init(struct request_sock *req,
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tcp_rsk(req)->snt_synack = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -991,11 +1115,14 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->wscale_ok = rx_opt->wscale_ok;
ireq->acked = 0;
ireq->ecn_ok = 0;
- ireq->rmt_port = tcp_hdr(skb)->source;
- ireq->loc_port = tcp_hdr(skb)->dest;
+ ireq->ir_rmt_port = tcp_hdr(skb)->source;
+ ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
}
-extern void tcp_enter_memory_pressure(struct sock *sk);
+extern void tcp_openreq_init_rwin(struct request_sock *req,
+ struct sock *sk, struct dst_entry *dst);
+
+void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
@@ -1031,22 +1158,28 @@ static inline int tcp_fin_time(const struct sock *sk)
return fin_timeout;
}
-static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
- int paws_win)
+static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
+ int paws_win)
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
- return 1;
+ return true;
if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
- return 1;
-
- return 0;
+ return true;
+ /*
+ * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
+ * then following tcp messages have valid values. Ignore 0 value,
+ * or else 'negative' tsval might forbid us to accept their packets.
+ */
+ if (!rx_opt->ts_recent)
+ return true;
+ return false;
}
-static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
- int rst)
+static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
+ int rst)
{
if (tcp_paws_check(rx_opt, 0))
- return 0;
+ return false;
/* RST segments are not recommended to carry timestamp,
and, if they do, it is recommended to ignore PAWS because
@@ -1061,12 +1194,10 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
However, we can relax time bounds for RST segments to MSL.
*/
if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
- return 0;
- return 1;
+ return false;
+ return true;
}
-#define TCP_CHECK_TIMER(sk) do { } while (0)
-
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
@@ -1080,7 +1211,6 @@ static inline void tcp_mib_init(struct net *net)
static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
{
tp->lost_skb_hint = NULL;
- tp->scoreboard_skb_hint = NULL;
}
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
@@ -1092,35 +1222,27 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
/* MD5 Signature */
struct crypto_hash;
+union tcp_md5_addr {
+ struct in_addr a4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr a6;
+#endif
+};
+
/* - key database */
struct tcp_md5sig_key {
- u8 *key;
+ struct hlist_node node;
u8 keylen;
-};
-
-struct tcp4_md5sig_key {
- struct tcp_md5sig_key base;
- __be32 addr;
-};
-
-struct tcp6_md5sig_key {
- struct tcp_md5sig_key base;
-#if 0
- u32 scope_id; /* XXX */
-#endif
- struct in6_addr addr;
+ u8 family; /* AF_INET or AF_INET6 */
+ union tcp_md5_addr addr;
+ u8 key[TCP_MD5SIG_MAXKEYLEN];
+ struct rcu_head rcu;
};
/* - sock block */
struct tcp_md5sig_info {
- struct tcp4_md5sig_key *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct tcp6_md5sig_key *keys6;
- u32 entries6;
- u32 alloced6;
-#endif
- u32 entries4;
- u32 alloced4;
+ struct hlist_head head;
+ struct rcu_head rcu;
};
/* - pseudo header */
@@ -1141,7 +1263,7 @@ struct tcp6_pseudohdr {
union tcp_md5sum_block {
struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct tcp6_pseudohdr ip6;
#endif
};
@@ -1152,39 +1274,76 @@ struct tcp_md5sig_pool {
union tcp_md5sum_block md5_blk;
};
-#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
-
/* - functions */
-extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb);
-extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
- struct sock *addr_sk);
-extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
- u8 newkeylen);
-extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr);
+int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+ int family);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+ struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
- &(struct tcp_md5sig_key) { \
- .key = (twsk)->tw_md5_key, \
- .keylen = (twsk)->tw_md5_keylen, \
- } : NULL)
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
+#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
+static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
+{
+ return NULL;
+}
#define tcp_twsk_md5_key(twsk) NULL
#endif
-extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
-extern void tcp_free_md5sig_pool(void);
+bool tcp_alloc_md5sig_pool(void);
-extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
-extern void tcp_put_md5sig_pool(void);
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
+static inline void tcp_put_md5sig_pool(void)
+{
+ local_bh_enable();
+}
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
- unsigned header_len);
-extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
- struct tcp_md5sig_key *key);
+int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+ unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+ const struct tcp_md5sig_key *key);
+
+/* From tcp_fastopen.c */
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie, int *syn_loss,
+ unsigned long *last_syn_loss);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost);
+struct tcp_fastopen_request {
+ /* Fast Open cookie. Size 0 means a cookie request */
+ struct tcp_fastopen_cookie cookie;
+ struct msghdr *data; /* data in MSG_FASTOPEN */
+ size_t size;
+ int copied; /* queued in tcp_connect() */
+};
+void tcp_free_fastopen_req(struct tcp_sock *tp);
+
+extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ struct dst_entry *dst);
+void tcp_fastopen_init_key_once(bool publish);
+#define TCP_FASTOPEN_KEY_LENGTH 16
+
+/* Fastopen key context */
+struct tcp_fastopen_context {
+ struct crypto_cipher *tfm;
+ __u8 key[TCP_FASTOPEN_KEY_LENGTH];
+ struct rcu_head rcu;
+};
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
@@ -1197,22 +1356,24 @@ static inline void tcp_write_queue_purge(struct sock *sk)
tcp_clear_all_retrans_hints(tcp_sk(sk));
}
-static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_next(&sk->sk_write_queue, skb);
}
-static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}
@@ -1226,7 +1387,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-static inline struct sk_buff *tcp_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_send_head(const struct sock *sk)
{
return sk->sk_send_head;
}
@@ -1237,7 +1398,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
-static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
@@ -1303,7 +1464,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
__skb_unlink(skb, &sk->sk_write_queue);
}
-static inline int tcp_write_queue_empty(struct sock *sk)
+static inline bool tcp_write_queue_empty(struct sock *sk)
{
return skb_queue_empty(&sk->sk_write_queue);
}
@@ -1317,8 +1478,9 @@ static inline void tcp_push_pending_frames(struct sock *sk)
}
}
-/* Start sequence of the highest skb with SACKed bit, valid only if
- * sacked > 0 or when the caller has ensured validity by itself.
+/* Start sequence of the skb just after the highest skb with SACKed
+ * bit, valid only if sacked_out > 0 or when the caller has ensured
+ * validity by itself.
*/
static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
{
@@ -1359,7 +1521,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
/* Determines whether this is a thin stream (which may suffer from
* increased latency). Used to trigger latency-reducing mechanisms.
*/
-static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
+static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
{
return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
}
@@ -1369,14 +1531,15 @@ enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
TCP_SEQ_STATE_OPENREQ,
TCP_SEQ_STATE_ESTABLISHED,
- TCP_SEQ_STATE_TIME_WAIT,
};
+int tcp_seq_open(struct inode *inode, struct file *file);
+
struct tcp_seq_afinfo {
- char *name;
- sa_family_t family;
- struct file_operations seq_fops;
- struct seq_operations seq_ops;
+ char *name;
+ sa_family_t family;
+ const struct file_operations *seq_fops;
+ struct seq_operations seq_ops;
};
struct tcp_iter_state {
@@ -1384,30 +1547,42 @@ struct tcp_iter_state {
sa_family_t family;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
- int bucket, offset, sbucket, num, uid;
+ int bucket, offset, sbucket, num;
+ kuid_t uid;
loff_t last_pos;
};
-extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
+int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
+void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
extern struct request_sock_ops tcp_request_sock_ops;
extern struct request_sock_ops tcp6_request_sock_ops;
-extern void tcp_v4_destroy_sock(struct sock *sk);
+void tcp_v4_destroy_sock(struct sock *sk);
+
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int tcp_gro_complete(struct sk_buff *skb);
+
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
+
+static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+{
+ return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+}
+
+static inline bool tcp_stream_memory_free(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
-extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
-extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
-extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
-extern int tcp_gro_complete(struct sk_buff *skb);
-extern int tcp4_gro_complete(struct sk_buff *skb);
+ return notsent_bytes < tcp_notsent_lowat(tp);
+}
#ifdef CONFIG_PROC_FS
-extern int tcp4_proc_init(void);
-extern void tcp4_proc_exit(void);
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
#endif
/* TCP af-specific functions */
@@ -1417,13 +1592,9 @@ struct tcp_sock_af_ops {
struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
- int (*md5_add) (struct sock *sk,
- struct sock *addr_sk,
- u8 *newkey,
- u8 len);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
int (*md5_parse) (struct sock *sk,
char __user *optval,
int optlen);
@@ -1436,98 +1607,15 @@ struct tcp_request_sock_ops {
struct request_sock *req);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
#endif
};
-/* Using SHA1 for now, define some constants.
- */
-#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
-#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
-#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
-
-extern int tcp_cookie_generator(u32 *bakery);
-
-/**
- * struct tcp_cookie_values - each socket needs extra space for the
- * cookies, together with (optional) space for any SYN data.
- *
- * A tcp_sock contains a pointer to the current value, and this is
- * cloned to the tcp_timewait_sock.
- *
- * @cookie_pair: variable data from the option exchange.
- *
- * @cookie_desired: user specified tcpct_cookie_desired. Zero
- * indicates default (sysctl_tcp_cookie_size).
- * After cookie sent, remembers size of cookie.
- * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
- *
- * @s_data_desired: user specified tcpct_s_data_desired. When the
- * constant payload is specified (@s_data_constant),
- * holds its length instead.
- * Range 0 to TCP_MSS_DESIRED.
- *
- * @s_data_payload: constant data that is to be included in the
- * payload of SYN or SYNACK segments when the
- * cookie option is present.
- */
-struct tcp_cookie_values {
- struct kref kref;
- u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
- u8 cookie_pair_size;
- u8 cookie_desired;
- u16 s_data_desired:11,
- s_data_constant:1,
- s_data_in:1,
- s_data_out:1,
- s_data_unused:2;
- u8 s_data_payload[0];
-};
-
-static inline void tcp_cookie_values_release(struct kref *kref)
-{
- kfree(container_of(kref, struct tcp_cookie_values, kref));
-}
-
-/* The length of constant payload data. Note that s_data_desired is
- * overloaded, depending on s_data_constant: either the length of constant
- * data (returned here) or the limit on variable data.
- */
-static inline int tcp_s_data_size(const struct tcp_sock *tp)
-{
- return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
- ? tp->cookie_values->s_data_desired
- : 0;
-}
-
-/**
- * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
- *
- * As tcp_request_sock has already been extended in other places, the
- * only remaining method is to pass stack values along as function
- * parameters. These parameters are not needed after sending SYNACK.
- *
- * @cookie_bakery: cryptographic secret and message workspace.
- *
- * @cookie_plus: bytes in authenticator/cookie option, copied from
- * struct tcp_options_received (above).
- */
-struct tcp_extend_values {
- struct request_values rv;
- u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
- u8 cookie_plus:6,
- cookie_out_never:1,
- cookie_in_always:1;
-};
-
-static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
-{
- return (struct tcp_extend_values *)rvp;
-}
+int tcpv4_offload_init(void);
-extern void tcp_v4_init(void);
-extern void tcp_init(void);
+void tcp_v4_init(void);
+void tcp_init(void);
#endif /* _TCP_H */
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644
index 00000000000..05b94d9453d
--- /dev/null
+++ b/include/net/tcp_memcontrol.h
@@ -0,0 +1,7 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct mem_cgroup *memcg);
+#endif /* _TCP_MEMCG_H */
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 053b3cf2c66..68f0ecad6c6 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -12,6 +12,7 @@
#define _TIMEWAIT_SOCK_H
#include <linux/slab.h>
+#include <linux/bug.h>
#include <net/sock.h>
struct timewait_sock_ops {
@@ -21,7 +22,6 @@ struct timewait_sock_ops {
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
void (*twsk_destructor)(struct sock *sk);
- void *(*twsk_getpeer)(struct sock *sk);
};
static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -40,11 +40,4 @@ static inline void twsk_destructor(struct sock *sk)
sk->sk_prot->twsk_prot->twsk_destructor(sk);
}
-static inline void *twsk_getpeer(struct sock *sk)
-{
- if (sk->sk_prot->twsk_prot->twsk_getpeer)
- return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
- return NULL;
-}
-
#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 42a0eb68b7b..b927413dde8 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -3,59 +3,62 @@
#include <net/checksum.h>
-/*
- * IPv6 transport protocols
- */
-
-#ifdef __KERNEL__
-
+/* IPv6 transport protocols */
extern struct proto rawv6_prot;
extern struct proto udpv6_prot;
extern struct proto udplitev6_prot;
extern struct proto tcpv6_prot;
+extern struct proto pingv6_prot;
-struct flowi;
+struct flowi6;
-/* extention headers */
-extern int ipv6_exthdrs_init(void);
-extern void ipv6_exthdrs_exit(void);
-extern int ipv6_frag_init(void);
-extern void ipv6_frag_exit(void);
+/* extension headers */
+int ipv6_exthdrs_init(void);
+void ipv6_exthdrs_exit(void);
+int ipv6_frag_init(void);
+void ipv6_frag_exit(void);
/* transport protocols */
-extern int rawv6_init(void);
-extern void rawv6_exit(void);
-extern int udpv6_init(void);
-extern void udpv6_exit(void);
-extern int udplitev6_init(void);
-extern void udplitev6_exit(void);
-extern int tcpv6_init(void);
-extern void tcpv6_exit(void);
-
-extern int udpv6_connect(struct sock *sk,
- struct sockaddr *uaddr,
- int addr_len);
-
-extern int datagram_recv_ctl(struct sock *sk,
- struct msghdr *msg,
- struct sk_buff *skb);
-
-extern int datagram_send_ctl(struct net *net,
- struct msghdr *msg,
- struct flowi *fl,
- struct ipv6_txoptions *opt,
- int *hlimit, int *tclass,
- int *dontfrag);
-
-#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
-
-/*
- * address family specific functions
- */
+int pingv6_init(void);
+void pingv6_exit(void);
+int rawv6_init(void);
+void rawv6_exit(void);
+int udpv6_init(void);
+void udpv6_exit(void);
+int udplitev6_init(void);
+void udplitev6_exit(void);
+int tcpv6_init(void);
+void tcpv6_exit(void);
+
+int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+/* this does all the common and the specific ctl work */
+void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb);
+
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
+ struct flowi6 *fl6, struct ipv6_txoptions *opt,
+ int *hlimit, int *tclass, int *dontfrag);
+
+void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ __u16 srcp, __u16 destp, int bucket);
+
+#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+
+/* address family specific functions */
extern const struct inet_connection_sock_af_ops ipv4_specific;
-extern void inet6_destroy_sock(struct sock *sk);
+void inet6_destroy_sock(struct sock *sk);
-#endif
+#define IPV6_SEQ_DGRAM_HEADER \
+ " sl " \
+ "local_address " \
+ "remote_address " \
+ "st tx_queue rx_queue tr tm->when retrnsmt" \
+ " uid timeout inode ref pointer drops\n"
#endif
diff --git a/include/net/tso.h b/include/net/tso.h
new file mode 100644
index 00000000000..47e5444f7d1
--- /dev/null
+++ b/include/net/tso.h
@@ -0,0 +1,20 @@
+#ifndef _TSO_H
+#define _TSO_H
+
+#include <net/ip.h>
+
+struct tso_t {
+ int next_frag_idx;
+ void *data;
+ size_t size;
+ u16 ip_id;
+ u32 tcp_seq;
+};
+
+int tso_count_descs(struct sk_buff *skb);
+void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+ int size, bool is_last);
+void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
+void tso_start(struct sk_buff *skb, struct tso_t *tso);
+
+#endif /* _TSO_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index bb967dd59bf..68a1fefe3df 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -23,6 +23,7 @@
#define _UDP_H
#include <linux/list.h>
+#include <linux/bug.h>
#include <net/inet_sock.h>
#include <net/sock.h>
#include <net/snmp.h>
@@ -41,7 +42,7 @@
struct udp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
@@ -78,9 +79,9 @@ struct udp_table {
unsigned int log;
};
extern struct udp_table udp_table;
-extern void udp_table_init(struct udp_table *, const char *);
+void udp_table_init(struct udp_table *, const char *);
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
- struct net *net, unsigned num)
+ struct net *net, unsigned int num)
{
return &table->hash[udp_hashfn(net, num, table->mask)];
}
@@ -94,15 +95,6 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
return &table->hash2[hash & table->mask];
}
-/* Note: this must match 'valbool' in sock_setsockopt */
-#define UDP_CSUM_NOXMIT 1
-
-/* Used by SunRPC/xprt layer. */
-#define UDP_CSUM_NORCV 2
-
-/* Default, as per the RFC, is to always do csums. */
-#define UDP_CSUM_DEFAULT 0
-
extern struct proto udp_prot;
extern atomic_long_t udp_memory_allocated;
@@ -119,7 +111,9 @@ struct sk_buff;
*/
static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
{
- return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov);
+ return (UDP_SKB_CB(skb)->cscov == skb->len ?
+ __skb_checksum_complete(skb) :
+ __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
}
static inline int udp_lib_checksum_complete(struct sk_buff *skb)
@@ -144,48 +138,79 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+static inline __wsum udp_csum(struct sk_buff *skb)
+{
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), skb->csum);
+
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+ csum = csum_add(csum, skb->csum);
+ }
+ return csum;
+}
+
+static inline __sum16 udp_v4_check(int len, __be32 saddr,
+ __be32 daddr, __wsum base)
+{
+ return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
+}
+
+void udp_set_csum(bool nocheck, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, int len);
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk)
{
BUG();
}
-extern void udp_lib_unhash(struct sock *sk);
-extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
-extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
- int (*)(const struct sock *,const struct sock *),
- unsigned int hash2_nulladdr);
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ int (*)(const struct sock *, const struct sock *),
+ unsigned int hash2_nulladdr);
/* net/ipv4/udp.c */
-extern int udp_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_cmp)(const struct sock *,
- const struct sock *));
-extern void udp_err(struct sk_buff *, u32);
-extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
-extern void udp_flush_pending_frames(struct sock *sk);
-extern int udp_rcv(struct sk_buff *skb);
-extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int udp_disconnect(struct sock *sk, int flags);
-extern unsigned int udp_poll(struct file *file, struct socket *sock,
- poll_table *wait);
-extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen,
- int (*push_pending_frames)(struct sock *));
-extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport,
- int dif);
-extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif);
+void udp_v4_early_demux(struct sk_buff *skb);
+int udp_get_port(struct sock *sk, unsigned short snum,
+ int (*saddr_cmp)(const struct sock *,
+ const struct sock *));
+void udp_err(struct sk_buff *, u32);
+int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_disconnect(struct sock *sk, int flags);
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen,
+ int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif,
+ struct udp_table *tbl);
+struct sock *udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, struct udp_table *tbl);
/*
* SNMP statistics for UDP and UDP-Lite
@@ -206,25 +231,27 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
} while(0)
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#define UDPX_INC_STATS_BH(sk, field) \
- do { \
- if ((sk)->sk_family == AF_INET) \
- UDP_INC_STATS_BH(sock_net(sk), field, 0); \
- else \
- UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
- } while (0);
+#if IS_ENABLED(CONFIG_IPV6)
+#define UDPX_INC_STATS_BH(sk, field) \
+do { \
+ if ((sk)->sk_family == AF_INET) \
+ UDP_INC_STATS_BH(sock_net(sk), field, 0); \
+ else \
+ UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
+} while (0)
#else
#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
#endif
/* /proc */
+int udp_seq_open(struct inode *inode, struct file *file);
+
struct udp_seq_afinfo {
- char *name;
- sa_family_t family;
- struct udp_table *udp_table;
- struct file_operations seq_fops;
- struct seq_operations seq_ops;
+ char *name;
+ sa_family_t family;
+ struct udp_table *udp_table;
+ const struct file_operations *seq_fops;
+ struct seq_operations seq_ops;
};
struct udp_iter_state {
@@ -235,15 +262,19 @@ struct udp_iter_state {
};
#ifdef CONFIG_PROC_FS
-extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
+void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
-extern int udp4_proc_init(void);
-extern void udp4_proc_exit(void);
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
#endif
-extern void udp_init(void);
+int udpv4_offload_init(void);
-extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+void udp_init(void);
+
+void udp_encap_enable(void);
+#if IS_ENABLED(CONFIG_IPV6)
+void udpv6_encap_enable(void);
+#endif
#endif /* _UDP_H */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index afdffe607b2..2caadabcd07 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -40,7 +40,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
* with a zero checksum field are illegal. */
if (uh->check == 0) {
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: zeroed checksum field\n");
+ LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n");
return 1;
}
@@ -52,7 +52,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
/*
* Coverage length violates RFC 3828: log and discard silently.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: bad csum coverage %d/%d\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n",
cscov, skb->len);
return 1;
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+/* Slow-path computation of checksum. Socket is locked. */
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
+ const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
+ __wsum csum = 0;
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket
- */
- if (up->pcflag & UDPLITE_SEND_CC) {
+ if (up->pcflag & UDPLITE_SEND_CC) {
+ /*
+ * Sender has set `partial coverage' option on UDP-Lite socket.
+ * The special case "up->pcslen == 0" signifies full coverage.
+ */
if (up->pcslen < up->len) {
- /* up->pcslen == 0 means that full coverage is required,
- * partial coverage only if 0 < up->pcslen < up->len */
- if (0 < up->pcslen) {
- cscov = up->pcslen;
- }
- uh->len = htons(up->pcslen);
+ if (0 < up->pcslen)
+ cscov = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
}
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
+ /*
+ * NOTE: Causes for the error case `up->pcslen > up->len':
+ * (i) Application error (will not be penalized).
+ * (ii) Payload too big for send buffer: data is split
+ * into several packets, each with its own header.
+ * In this case (e.g. last segment), coverage may
+ * exceed packet length.
+ * Since packets with coverage length > packet length are
+ * illegal, we fall back to the defaults here.
+ */
}
- return cscov;
-}
-
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
- __wsum csum = 0;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -115,7 +109,24 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
-extern void udplite4_register(void);
-extern int udplite_get_port(struct sock *sk, unsigned short snum,
- int (*scmp)(const struct sock *, const struct sock *));
+/* Fast-path computation of checksum. Socket may not be locked. */
+static inline __wsum udplite_csum(struct sk_buff *skb)
+{
+ const struct udp_sock *up = udp_sk(skb->sk);
+ const int off = skb_transport_offset(skb);
+ int len = skb->len - off;
+
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
+ skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
+
+ return skb_checksum(skb, off, len, 0);
+}
+
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+ int (*scmp)(const struct sock *, const struct sock *));
#endif /* _UDPLITE_H */
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
new file mode 100644
index 00000000000..9ccd5316eac
--- /dev/null
+++ b/include/net/vsock_addr.h
@@ -0,0 +1,30 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _VSOCK_ADDR_H_
+#define _VSOCK_ADDR_H_
+
+#include <linux/vm_sockets.h>
+
+void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
+int vsock_addr_validate(const struct sockaddr_vm *addr);
+bool vsock_addr_bound(const struct sockaddr_vm *addr);
+void vsock_addr_unbind(struct sockaddr_vm *addr);
+bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
+ const struct sockaddr_vm *other);
+int vsock_addr_cast(const struct sockaddr *addr, size_t len,
+ struct sockaddr_vm **out_addr);
+
+#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
new file mode 100644
index 00000000000..12196ce661d
--- /dev/null
+++ b/include/net/vxlan.h
@@ -0,0 +1,62 @@
+#ifndef __NET_VXLAN_H
+#define __NET_VXLAN_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+
+struct vxlan_sock;
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+
+/* per UDP socket information */
+struct vxlan_sock {
+ struct hlist_node hlist;
+ vxlan_rcv_t *rcv;
+ void *data;
+ struct work_struct del_work;
+ struct socket *sock;
+ struct rcu_head rcu;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+ atomic_t refcnt;
+ struct udp_offload udp_offloads;
+};
+
+#define VXLAN_F_LEARN 0x01
+#define VXLAN_F_PROXY 0x02
+#define VXLAN_F_RSC 0x04
+#define VXLAN_F_L2MISS 0x08
+#define VXLAN_F_L3MISS 0x10
+#define VXLAN_F_IPV6 0x20
+#define VXLAN_F_UDP_CSUM 0x40
+#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
+#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
+
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, u32 flags);
+
+void vxlan_sock_release(struct vxlan_sock *vs);
+
+int vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
+
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+/* IPv6 header + UDP + VXLAN + Ethernet header */
+#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+
+#if IS_ENABLED(CONFIG_VXLAN)
+void vxlan_get_rx_port(struct net_device *netdev);
+#else
+static inline void vxlan_get_rx_port(struct net_device *netdev)
+{
+}
+#endif
+#endif
diff --git a/include/net/wext.h b/include/net/wext.h
index 4f6e7423174..345911965db 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,13 +6,13 @@
struct net;
#ifdef CONFIG_WEXT_CORE
-extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
- void __user *arg);
-extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
- unsigned long arg);
+int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+ void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+ unsigned long arg);
-extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
-extern int call_commit_handler(struct net_device *dev);
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
#else
static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
void __user *arg)
@@ -27,8 +27,8 @@ static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
#endif
#ifdef CONFIG_WEXT_PROC
-extern int wext_proc_init(struct net *net);
-extern void wext_proc_exit(struct net *net);
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
#else
static inline int wext_proc_init(struct net *net)
{
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 3461aa1df1e..e52ef5357e0 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -250,7 +250,6 @@
#ifndef __NET__WIMAX_H__
#define __NET__WIMAX_H__
-#ifdef __KERNEL__
#include <linux/wimax.h>
#include <net/genetlink.h>
@@ -286,7 +285,7 @@ struct wimax_dev;
* does not disconnect the device from the bus and return 0.
* If that fails, it should resort to some sort of cold or bus
* reset (even if it implies a bus disconnection and device
- * dissapearance). In that case, -ENODEV should be returned to
+ * disappearance). In that case, -ENODEV should be returned to
* indicate the device is gone.
* This operation has to be synchronous, and return only when the
* reset is complete. In case of having had to resort to bus/cold
@@ -424,9 +423,8 @@ struct wimax_dev {
int (*op_reset)(struct wimax_dev *wimax_dev);
struct rfkill *rfkill;
- struct input_dev *rfkill_input;
- unsigned rf_hw;
- unsigned rf_sw;
+ unsigned int rf_hw;
+ unsigned int rf_sw;
char name[32];
struct dentry *debugfs_dentry;
@@ -440,9 +438,9 @@ struct wimax_dev {
*
* These functions are not exported to user space.
*/
-extern void wimax_dev_init(struct wimax_dev *);
-extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
-extern void wimax_dev_rm(struct wimax_dev *);
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
static inline
struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
@@ -456,8 +454,8 @@ struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
return wimax_dev->net_dev->dev.parent;
}
-extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
-extern enum wimax_st wimax_state_get(struct wimax_dev *);
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
/*
* Radio Switch state reporting.
@@ -465,8 +463,8 @@ extern enum wimax_st wimax_state_get(struct wimax_dev *);
* enum wimax_rf_state is declared in linux/wimax.h so the exports
* to user space can use it.
*/
-extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
/*
@@ -485,22 +483,21 @@ extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
* Be sure not to modify skb->data in the middle (ie: don't use
* skb_push()/skb_pull()/skb_reserve() on the skb).
*
- * "pipe_name" is any string, than can be interpreted as the name of
- * the pipe or destinatary; the interpretation of it is driver
+ * "pipe_name" is any string, that can be interpreted as the name of
+ * the pipe or recipient; the interpretation of it is driver
* specific, so the recipient can multiplex it as wished. It can be
* NULL, it won't be used - an example is using a "diagnostics" tag to
* send diagnostics information that a device-specific diagnostics
* tool would be interested in.
*/
-extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
- const void *, size_t, gfp_t);
-extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-extern int wimax_msg(struct wimax_dev *, const char *,
- const void *, size_t, gfp_t);
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+ size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
-extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-extern const void *wimax_msg_data(struct sk_buff *);
-extern ssize_t wimax_msg_len(struct sk_buff *);
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
/*
@@ -515,11 +512,7 @@ extern ssize_t wimax_msg_len(struct sk_buff *);
* device's control structure and (as such) the 'struct wimax_dev' is
* referenced by the caller.
*/
-extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-extern int wimax_reset(struct wimax_dev *);
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
-#else
-/* You might be looking for linux/wimax.h */
-#error This file should not be included from user space.
-#endif /* #ifdef __KERNEL__ */
#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
index 85926231c07..10ab0fc6d4f 100644
--- a/include/net/wpan-phy.h
+++ b/include/net/wpan-phy.h
@@ -23,28 +23,52 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/bug.h>
+
+/* According to the IEEE 802.15.4 stadard the upper most significant bits of
+ * the 32-bit channel bitmaps shall be used as an integer value to specify 32
+ * possible channel pages. The lower 27 bits of the channel bit map shall be
+ * used as a bit mask to specify channel numbers within a channel page.
+ */
+#define WPAN_NUM_CHANNELS 27
+#define WPAN_NUM_PAGES 32
struct wpan_phy {
struct mutex pib_lock;
/*
- * This is a PIB acording to 802.15.4-2006.
+ * This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
* aren't used outside of driver
*/
u8 current_channel;
u8 current_page;
u32 channels_supported[32];
- u8 transmit_power;
+ s8 transmit_power;
u8 cca_mode;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+ s32 cca_ed_level;
struct device dev;
int idx;
struct net_device *(*add_iface)(struct wpan_phy *phy,
- const char *name);
+ const char *name, int type);
void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
+ int (*set_txpower)(struct wpan_phy *phy, int db);
+ int (*set_lbt)(struct wpan_phy *phy, bool on);
+ int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
+ int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
+ int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
+ u8 retries);
+ int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
+
char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
};
diff --git a/include/net/x25.h b/include/net/x25.h
index a06119a0512..c383aa4edbf 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -187,57 +187,57 @@ extern int sysctl_x25_clear_request_timeout;
extern int sysctl_x25_ack_holdback_timeout;
extern int sysctl_x25_forward;
-extern int x25_parse_address_block(struct sk_buff *skb,
- struct x25_address *called_addr,
- struct x25_address *calling_addr);
-
-extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
- struct x25_address *);
-extern int x25_addr_aton(unsigned char *, struct x25_address *,
- struct x25_address *);
-extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
-extern void x25_destroy_socket_from_timer(struct sock *);
-extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
-extern void x25_kill_by_neigh(struct x25_neigh *);
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr);
+
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
/* x25_dev.c */
-extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
-extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-extern void x25_establish_link(struct x25_neigh *);
-extern void x25_terminate_link(struct x25_neigh *);
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
/* x25_facilities.c */
-extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
- struct x25_dte_facilities *, unsigned long *);
-extern int x25_create_facilities(unsigned char *, struct x25_facilities *,
- struct x25_dte_facilities *, unsigned long);
-extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
- struct x25_facilities *,
- struct x25_dte_facilities *);
-extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+ struct x25_facilities *,
+ struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
/* x25_forward.c */
-extern void x25_clear_forward_by_lci(unsigned int lci);
-extern void x25_clear_forward_by_dev(struct net_device *);
-extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
-extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
- struct sk_buff *, int);
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+ int);
/* x25_in.c */
-extern int x25_process_rx_frame(struct sock *, struct sk_buff *);
-extern int x25_backlog_rcv(struct sock *, struct sk_buff *);
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
/* x25_link.c */
-extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
-extern void x25_link_device_up(struct net_device *);
-extern void x25_link_device_down(struct net_device *);
-extern void x25_link_established(struct x25_neigh *);
-extern void x25_link_terminated(struct x25_neigh *);
-extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
-extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
-extern int x25_subscr_ioctl(unsigned int, void __user *);
-extern struct x25_neigh *x25_get_neigh(struct net_device *);
-extern void x25_link_free(void);
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+ unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
/* x25_neigh.c */
static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
@@ -252,16 +252,16 @@ static __inline__ void x25_neigh_put(struct x25_neigh *nb)
}
/* x25_out.c */
-extern int x25_output(struct sock *, struct sk_buff *);
-extern void x25_kick(struct sock *);
-extern void x25_enquiry_response(struct sock *);
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
/* x25_route.c */
-extern struct x25_route *x25_get_route(struct x25_address *addr);
-extern struct net_device *x25_dev_get(char *);
-extern void x25_route_device_down(struct net_device *dev);
-extern int x25_route_ioctl(unsigned int, void __user *);
-extern void x25_route_free(void);
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
static __inline__ void x25_route_hold(struct x25_route *rt)
{
@@ -275,37 +275,38 @@ static __inline__ void x25_route_put(struct x25_route *rt)
}
/* x25_subr.c */
-extern void x25_clear_queues(struct sock *);
-extern void x25_frames_acked(struct sock *, unsigned short);
-extern void x25_requeue_frames(struct sock *);
-extern int x25_validate_nr(struct sock *, unsigned short);
-extern void x25_write_internal(struct sock *, int);
-extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
-extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+ int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
/* x25_timer.c */
-extern void x25_init_timers(struct sock *sk);
-extern void x25_start_heartbeat(struct sock *);
-extern void x25_start_t2timer(struct sock *);
-extern void x25_start_t21timer(struct sock *);
-extern void x25_start_t22timer(struct sock *);
-extern void x25_start_t23timer(struct sock *);
-extern void x25_stop_heartbeat(struct sock *);
-extern void x25_stop_timer(struct sock *);
-extern unsigned long x25_display_timer(struct sock *);
-extern void x25_check_rbuf(struct sock *);
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL
-extern void x25_register_sysctl(void);
-extern void x25_unregister_sysctl(void);
+void x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
#else
static inline void x25_register_sysctl(void) {};
static inline void x25_unregister_sysctl(void) {};
#endif /* CONFIG_SYSCTL */
struct x25_skb_cb {
- unsigned flags;
+ unsigned int flags;
};
#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
@@ -318,6 +319,6 @@ extern rwlock_t x25_forward_list_lock;
extern struct list_head x25_neigh_list;
extern rwlock_t x25_neigh_list_lock;
-extern int x25_proc_init(void);
-extern void x25_proc_exit(void);
+int x25_proc_init(void);
+void x25_proc_exit(void);
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7fa5b005893..721e9c3b11b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -36,6 +36,7 @@
#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
+#define XFRM_ALIGN4(len) (((len) + 3) & ~3)
#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
#define MODULE_ALIAS_XFRM_MODE(family, encap) \
MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
@@ -52,7 +53,6 @@
#define XFRM_INC_STATS_USER(net, field) ((void)(net))
#endif
-extern struct mutex xfrm_cfg_mutex;
/* Organization of SPD aka "XFRM rules"
------------------------------------
@@ -118,11 +118,10 @@ extern struct mutex xfrm_cfg_mutex;
struct xfrm_state_walk {
struct list_head all;
u8 state;
- union {
- u8 dying;
- u8 proto;
- };
+ u8 dying;
+ u8 proto;
u32 seq;
+ struct xfrm_address_filter *filter;
};
/* Full description of state of transformer. */
@@ -143,6 +142,7 @@ struct xfrm_state {
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
+ u32 tfcpad;
u32 genid;
@@ -160,6 +160,7 @@ struct xfrm_state {
xfrm_address_t saddr;
int header_len;
int trailer_len;
+ u32 extra_flags;
} props;
struct xfrm_lifetime_cfg lft;
@@ -184,9 +185,14 @@ struct xfrm_state {
/* State for replay detection */
struct xfrm_replay_state replay;
+ struct xfrm_replay_state_esn *replay_esn;
/* Replay detection state at the time we sent the last notification */
struct xfrm_replay_state preplay;
+ struct xfrm_replay_state_esn *preplay_esn;
+
+ /* The functions for replay detection. */
+ struct xfrm_replay *repl;
/* internal flag that only holds state for delayed aevent at the
* moment
@@ -206,6 +212,9 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer;
+ /* used to fix curlft->add_time when changing date */
+ long saved_tmo;
+
/* Last used time */
unsigned long lastused;
@@ -231,6 +240,7 @@ static inline struct net *xs_net(struct xfrm_state *x)
/* xflags - make enum if more show up */
#define XFRM_TIME_DEFER 1
+#define XFRM_SOFT_EXPIRE 2
enum {
XFRM_STATE_VOID,
@@ -252,11 +262,23 @@ struct km_event {
} data;
u32 seq;
- u32 pid;
+ u32 portid;
u32 event;
struct net *net;
};
+struct xfrm_replay {
+ void (*advance)(struct xfrm_state *x, __be32 net_seq);
+ int (*check)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
+ int (*recheck)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
+ void (*notify)(struct xfrm_state *x, int event);
+ int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+};
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -265,30 +287,35 @@ struct xfrm_policy_afinfo {
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
struct dst_entry *(*dst_lookup)(struct net *net, int tos,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr);
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr);
int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
- int (*get_tos)(struct flowi *fl);
+ int (*get_tos)(const struct flowi *fl);
+ void (*init_dst)(struct net *net,
+ struct xfrm_dst *dst);
int (*init_path)(struct xfrm_dst *path,
struct dst_entry *dst,
int nfheader_len);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
- struct flowi *fl);
+ const struct flowi *fl);
+ struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
};
-extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
+int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+ const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
-extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
-extern int __xfrm_state_delete(struct xfrm_state *x);
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+ struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo {
unsigned int family;
@@ -298,24 +325,41 @@ struct xfrm_state_afinfo {
const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x);
- void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
- void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr);
+ void (*init_tempsel)(struct xfrm_selector *sel,
+ const struct flowi *fl);
+ void (*init_temprop)(struct xfrm_state *x,
+ const struct xfrm_tmpl *tmpl,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr);
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
- int (*output)(struct sk_buff *skb);
+ int (*output)(struct sock *sk, struct sk_buff *skb);
+ int (*output_finish)(struct sk_buff *skb);
int (*extract_input)(struct xfrm_state *x,
struct sk_buff *skb);
int (*extract_output)(struct xfrm_state *x,
struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb,
int async);
+ void (*local_error)(struct sk_buff *skb, u32 mtu);
};
-extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
-extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+struct xfrm_input_afinfo {
+ unsigned int family;
+ struct module *owner;
+ int (*callback)(struct sk_buff *skb, u8 protocol,
+ int err);
+};
+
+int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo);
+int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo);
+
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
char *description;
@@ -331,14 +375,15 @@ struct xfrm_type {
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
- int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
+ int (*reject)(struct xfrm_state *, struct sk_buff *,
+ const struct flowi *);
int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
/* Estimate maximal size of result of transformation of a dgram */
u32 (*get_mtu)(struct xfrm_state *, int size);
};
-extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
-extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_mode {
/*
@@ -399,8 +444,8 @@ enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};
-extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
-extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
+int xfrm_register_mode(struct xfrm_mode *mode, int family);
+int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
static inline int xfrm_af2proto(unsigned int family)
{
@@ -470,6 +515,12 @@ struct xfrm_policy_walk {
u32 seq;
};
+struct xfrm_policy_queue {
+ struct sk_buff_head hold_queue;
+ struct timer_list hold_timer;
+ unsigned long timeout;
+};
+
struct xfrm_policy {
#ifdef CONFIG_NET_NS
struct net *xp_net;
@@ -491,6 +542,7 @@ struct xfrm_policy {
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct xfrm_policy_walk_entry walk;
+ struct xfrm_policy_queue polq;
u8 type;
u8 action;
u8 flags;
@@ -500,7 +552,7 @@ struct xfrm_policy {
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
};
-static inline struct net *xp_net(struct xfrm_policy *xp)
+static inline struct net *xp_net(const struct xfrm_policy *xp)
{
return read_pnet(&xp->xp_net);
}
@@ -526,10 +578,6 @@ struct xfrm_migrate {
};
#define XFRM_KM_TIMEOUT 30
-/* which seqno */
-#define XFRM_REPLAY_SEQ 1
-#define XFRM_REPLAY_OSEQ 2
-#define XFRM_REPLAY_SEQ_MASK 3
/* what happened */
#define XFRM_REPLAY_UPDATE XFRM_AE_CR
#define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
@@ -544,17 +592,36 @@ struct xfrm_migrate {
struct xfrm_mgr {
struct list_head list;
char *id;
- int (*notify)(struct xfrm_state *x, struct km_event *c);
- int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+ int (*notify)(struct xfrm_state *x, const struct km_event *c);
+ int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
- int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
- int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
+ int (*migrate)(const struct xfrm_selector *sel,
+ u8 dir, u8 type,
+ const struct xfrm_migrate *m,
+ int num_bundles,
+ const struct xfrm_kmaddress *k);
+ bool (*is_alive)(const struct km_event *c);
+};
+
+int xfrm_register_km(struct xfrm_mgr *km);
+int xfrm_unregister_km(struct xfrm_mgr *km);
+
+struct xfrm_tunnel_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+
+ union {
+ struct ip_tunnel *ip4;
+ struct ip6_tnl *ip6;
+ } tunnel;
};
-extern int xfrm_register_km(struct xfrm_mgr *km);
-extern int xfrm_unregister_km(struct xfrm_mgr *km);
+#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
/*
* This structure is used for the duration where packets are being
@@ -562,15 +629,18 @@ extern int xfrm_unregister_km(struct xfrm_mgr *km);
* area beyond the generic IP part may be overwritten.
*/
struct xfrm_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
/* Sequence number for replay protection. */
union {
- u64 output;
- __be32 input;
+ struct {
+ __u32 low;
+ __u32 hi;
+ } output;
+ struct {
+ __be32 low;
+ __be32 hi;
+ } input;
} seq;
};
@@ -581,10 +651,7 @@ struct xfrm_skb_cb {
* to transmit header information to the mode input/output functions.
*/
struct xfrm_mode_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
/* Copied from header for IPv4, always set to zero and DF for IPv6. */
__be16 id;
@@ -616,10 +683,7 @@ struct xfrm_mode_skb_cb {
* related information.
*/
struct xfrm_spi_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
unsigned int daddroff;
unsigned int family;
@@ -627,13 +691,6 @@ struct xfrm_spi_skb_cb {
#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
-/* Audit Information */
-struct xfrm_audit {
- u32 secid;
- uid_t loginuid;
- u32 sessionid;
-};
-
#ifdef CONFIG_AUDITSYSCALL
static inline struct audit_buffer *xfrm_audit_start(const char *op)
{
@@ -649,55 +706,52 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
return audit_buf;
}
-static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
+static inline void xfrm_audit_helper_usrinfo(bool task_valid,
struct audit_buffer *audit_buf)
{
- char *secctx;
- u32 secctx_len;
+ const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
+ audit_get_loginuid(current) :
+ INVALID_UID);
+ const unsigned int ses = task_valid ? audit_get_sessionid(current) :
+ (unsigned int) -1;
audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
- if (secid != 0 &&
- security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
- audit_log_format(audit_buf, " subj=%s", secctx);
- security_release_secctx(secctx, secctx_len);
- } else
- audit_log_task_context(audit_buf);
-}
-
-extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
- u32 auid, u32 ses, u32 secid);
-extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
- u32 auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
- u32 auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
- u32 auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
- struct sk_buff *skb);
-extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
-extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
- __be32 net_spi, __be32 net_seq);
-extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
- struct sk_buff *skb, u8 proto);
+ audit_log_task_context(audit_buf);
+}
+
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
+ bool task_valid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+ struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+ __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+ __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+ u8 proto);
#else
static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
- u32 auid, u32 ses, u32 secid)
+ bool task_valid)
{
}
static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
- u32 auid, u32 ses, u32 secid)
+ bool task_valid)
{
}
static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
- u32 auid, u32 ses, u32 secid)
+ bool task_valid)
{
}
static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
- u32 auid, u32 ses, u32 secid)
+ bool task_valid)
{
}
@@ -706,6 +760,11 @@ static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
{
}
+static inline void xfrm_audit_state_replay(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+}
+
static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
u16 family)
{
@@ -728,7 +787,7 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy)
atomic_inc(&policy->refcnt);
}
-extern void xfrm_policy_destroy(struct xfrm_policy *policy);
+void xfrm_policy_destroy(struct xfrm_policy *policy);
static inline void xfrm_pol_put(struct xfrm_policy *policy)
{
@@ -743,7 +802,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-extern void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -761,10 +820,11 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
atomic_inc(&x->refcnt);
}
-static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
+static inline bool addr_match(const void *token1, const void *token2,
+ int prefixlen)
{
- __be32 *a1 = token1;
- __be32 *a2 = token2;
+ const __be32 *a1 = token1;
+ const __be32 *a2 = token2;
int pdw;
int pbi;
@@ -773,7 +833,7 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
if (pdw)
if (memcmp(a1, a2, pdw << 2))
- return 0;
+ return false;
if (pbi) {
__be32 mask;
@@ -781,32 +841,40 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
mask = htonl((0xffffffff) << (32 - pbi));
if ((a1[pdw] ^ a2[pdw]) & mask)
- return 0;
+ return false;
}
- return 1;
+ return true;
+}
+
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+ /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+ if (prefixlen == 0)
+ return true;
+ return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
}
static __inline__
-__be16 xfrm_flowi_sport(struct flowi *fl)
+__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
{
__be16 port;
- switch(fl->proto) {
+ switch(fl->flowi_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
- port = fl->fl_ip_sport;
+ port = uli->ports.sport;
break;
case IPPROTO_ICMP:
case IPPROTO_ICMPV6:
- port = htons(fl->fl_icmp_type);
+ port = htons(uli->icmpt.type);
break;
case IPPROTO_MH:
- port = htons(fl->fl_mh_type);
+ port = htons(uli->mht.type);
break;
case IPPROTO_GRE:
- port = htons(ntohl(fl->fl_gre_key) >> 16);
+ port = htons(ntohl(uli->gre_key) >> 16);
break;
default:
port = 0; /*XXX*/
@@ -815,22 +883,22 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
}
static __inline__
-__be16 xfrm_flowi_dport(struct flowi *fl)
+__be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
{
__be16 port;
- switch(fl->proto) {
+ switch(fl->flowi_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
- port = fl->fl_ip_dport;
+ port = uli->ports.dport;
break;
case IPPROTO_ICMP:
case IPPROTO_ICMPV6:
- port = htons(fl->fl_icmp_code);
+ port = htons(uli->icmpt.code);
break;
case IPPROTO_GRE:
- port = htons(ntohl(fl->fl_gre_key) & 0xffff);
+ port = htons(ntohl(uli->gre_key) & 0xffff);
break;
default:
port = 0; /*XXX*/
@@ -838,14 +906,14 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
return port;
}
-extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
- unsigned short family);
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+ const struct flowi *fl, unsigned short family);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
/* If neither has a context --> match
* Otherwise, both must have a context and the sids, doi, alg must match
*/
-static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
{
return ((!s1 && !s2) ||
(s1 && s2 &&
@@ -854,9 +922,9 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct
(s1->ctx_alg == s2->ctx_alg)));
}
#else
-static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
{
- return 1;
+ return true;
}
#endif
@@ -909,7 +977,7 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
}
#endif
-extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct sec_path {
atomic_t refcnt;
@@ -917,6 +985,15 @@ struct sec_path {
struct xfrm_state *xvec[XFRM_MAX_DEPTH];
};
+static inline int secpath_exists(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ return skb->sp != NULL;
+#else
+ return 0;
+#endif
+}
+
static inline struct sec_path *
secpath_get(struct sec_path *sp)
{
@@ -925,7 +1002,7 @@ secpath_get(struct sec_path *sp)
return sp;
}
-extern void __secpath_destroy(struct sec_path *sp);
+void __secpath_destroy(struct sec_path *sp);
static inline void
secpath_put(struct sec_path *sp)
@@ -934,7 +1011,7 @@ secpath_put(struct sec_path *sp)
__secpath_destroy(sp);
}
-extern struct sec_path *secpath_dup(struct sec_path *src);
+struct sec_path *secpath_dup(struct sec_path *src);
static inline void
secpath_reset(struct sk_buff *skb)
@@ -946,7 +1023,7 @@ secpath_reset(struct sk_buff *skb)
}
static inline int
-xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
+xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
{
switch (family) {
case AF_INET:
@@ -958,21 +1035,21 @@ xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
}
static inline int
-__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (tmpl->saddr.a4 &&
tmpl->saddr.a4 != x->props.saddr.a4);
}
static inline int
-__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
- ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
+ !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
}
static inline int
-xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
+xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
{
switch (family) {
case AF_INET:
@@ -984,7 +1061,8 @@ xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short
}
#ifdef CONFIG_XFRM
-extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+ unsigned short family);
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
@@ -1028,8 +1106,8 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
}
-extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
- unsigned int family, int reverse);
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+ unsigned int family, int reverse);
static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
unsigned int family)
@@ -1044,7 +1122,7 @@ static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
return __xfrm_decode_session(skb, fl, family, 1);
}
-extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
@@ -1065,7 +1143,7 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
return xfrm_route_forward(skb, AF_INET6);
}
-extern int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk);
static inline int xfrm_sk_clone_policy(struct sock *sk)
{
@@ -1074,7 +1152,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
return 0;
}
-extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
@@ -1088,6 +1166,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
}
}
+void xfrm_garbage_collect(struct net *net);
+
#else
static inline void xfrm_sk_free_policy(struct sock *sk) {}
@@ -1122,52 +1202,55 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
{
return 1;
}
+static inline void xfrm_garbage_collect(struct net *net)
+{
+}
#endif
static __inline__
-xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case AF_INET:
- return (xfrm_address_t *)&fl->fl4_dst;
+ return (xfrm_address_t *)&fl->u.ip4.daddr;
case AF_INET6:
- return (xfrm_address_t *)&fl->fl6_dst;
+ return (xfrm_address_t *)&fl->u.ip6.daddr;
}
return NULL;
}
static __inline__
-xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case AF_INET:
- return (xfrm_address_t *)&fl->fl4_src;
+ return (xfrm_address_t *)&fl->u.ip4.saddr;
case AF_INET6:
- return (xfrm_address_t *)&fl->fl6_src;
+ return (xfrm_address_t *)&fl->u.ip6.saddr;
}
return NULL;
}
static __inline__
-void xfrm_flowi_addr_get(struct flowi *fl,
+void xfrm_flowi_addr_get(const struct flowi *fl,
xfrm_address_t *saddr, xfrm_address_t *daddr,
unsigned short family)
{
switch(family) {
case AF_INET:
- memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4));
- memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4));
+ memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
+ memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case AF_INET6:
- ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src);
- ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst);
+ *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
+ *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
break;
}
}
static __inline__ int
-__xfrm4_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
if (daddr->a4 == x->id.daddr.a4 &&
(saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
@@ -1176,11 +1259,11 @@ __xfrm4_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-__xfrm6_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
- if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
- (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
+ if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
+ (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
ipv6_addr_any((struct in6_addr *)saddr) ||
ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
return 1;
@@ -1188,8 +1271,8 @@ __xfrm6_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-xfrm_state_addr_check(struct xfrm_state *x,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_state_addr_check(const struct xfrm_state *x,
+ const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
switch (family) {
@@ -1202,23 +1285,23 @@ xfrm_state_addr_check(struct xfrm_state *x,
}
static __inline__ int
-xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
+xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
unsigned short family)
{
switch (family) {
case AF_INET:
return __xfrm4_state_addr_check(x,
- (xfrm_address_t *)&fl->fl4_dst,
- (xfrm_address_t *)&fl->fl4_src);
+ (const xfrm_address_t *)&fl->u.ip4.daddr,
+ (const xfrm_address_t *)&fl->u.ip4.saddr);
case AF_INET6:
return __xfrm6_state_addr_check(x,
- (xfrm_address_t *)&fl->fl6_dst,
- (xfrm_address_t *)&fl->fl6_src);
+ (const xfrm_address_t *)&fl->u.ip6.daddr,
+ (const xfrm_address_t *)&fl->u.ip6.saddr);
}
return 0;
}
-static inline int xfrm_state_kern(struct xfrm_state *x)
+static inline int xfrm_state_kern(const struct xfrm_state *x)
{
return atomic_read(&x->tunnel_users);
}
@@ -1256,6 +1339,7 @@ struct xfrm_algo_desc {
char *name;
char *compat;
u8 available:1;
+ u8 pfkey_supported:1;
union {
struct xfrm_algo_aead_info aead;
struct xfrm_algo_auth_info auth;
@@ -1265,6 +1349,28 @@ struct xfrm_algo_desc {
struct sadb_alg desc;
};
+/* XFRM protocol handlers. */
+struct xfrm4_protocol {
+ int (*handler)(struct sk_buff *skb);
+ int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
+ struct xfrm4_protocol __rcu *next;
+ int priority;
+};
+
+struct xfrm6_protocol {
+ int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
+
+ struct xfrm6_protocol __rcu *next;
+ int priority;
+};
+
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
@@ -1282,16 +1388,19 @@ struct xfrm6_tunnel {
int priority;
};
-extern void xfrm_init(void);
-extern void xfrm4_init(int rt_hash_size);
-extern int xfrm_state_init(struct net *net);
-extern void xfrm_state_fini(struct net *net);
-extern void xfrm4_state_init(void);
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
+void xfrm4_protocol_init(void);
#ifdef CONFIG_XFRM
-extern int xfrm6_init(void);
-extern void xfrm6_fini(void);
-extern int xfrm6_state_init(void);
-extern void xfrm6_state_fini(void);
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
+int xfrm6_protocol_init(void);
+void xfrm6_protocol_fini(void);
#else
static inline int xfrm6_init(void)
{
@@ -1304,53 +1413,58 @@ static inline void xfrm6_fini(void)
#endif
#ifdef CONFIG_XFRM_STATISTICS
-extern int xfrm_proc_init(struct net *net);
-extern void xfrm_proc_fini(struct net *net);
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
#endif
-extern int xfrm_sysctl_init(struct net *net);
+int xfrm_sysctl_init(struct net *net);
#ifdef CONFIG_SYSCTL
-extern void xfrm_sysctl_fini(struct net *net);
+void xfrm_sysctl_fini(struct net *net);
#else
static inline void xfrm_sysctl_fini(struct net *net)
{
}
#endif
-extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
-extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
- int (*func)(struct xfrm_state *, int, void*), void *);
-extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
-extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
- struct flowi *fl, struct xfrm_tmpl *tmpl,
- struct xfrm_policy *pol, int *err,
- unsigned short family);
-extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
- unsigned short family,
- u8 mode, u8 proto, u32 reqid);
-extern int xfrm_state_check_expire(struct xfrm_state *x);
-extern void xfrm_state_insert(struct xfrm_state *x);
-extern int xfrm_state_add(struct xfrm_state *x);
-extern int xfrm_state_update(struct xfrm_state *x);
-extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
- xfrm_address_t *daddr, __be32 spi,
- u8 proto, unsigned short family);
-extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
- u8 proto,
- unsigned short family);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+ struct xfrm_address_filter *filter);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+ int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ const struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ struct xfrm_policy *pol, int *err,
+ unsigned short family);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+ xfrm_address_t *daddr,
+ xfrm_address_t *saddr,
+ unsigned short family,
+ u8 mode, u8 proto, u32 reqid);
+struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
+ unsigned short family);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr, __be32 spi,
+ u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ u8 proto,
+ unsigned short family);
#ifdef CONFIG_XFRM_SUB_POLICY
-extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
- int n, unsigned short family);
-extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
- int n, unsigned short family);
+int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+ unsigned short family, struct net *net);
+int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+ unsigned short family);
#else
static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
- int n, unsigned short family)
+ int n, unsigned short family, struct net *net)
{
return -ENOSYS;
}
@@ -1379,62 +1493,74 @@ struct xfrmk_spdinfo {
u32 spdhmcnt;
};
-extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
- u32 seq);
-extern int xfrm_state_delete(struct xfrm_state *x);
-extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
-extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
-extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern int xfrm_replay_check(struct xfrm_state *x,
- struct sk_buff *skb, __be32 seq);
-extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
-extern void xfrm_replay_notify(struct xfrm_state *x, int event);
-extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-extern int xfrm_init_state(struct xfrm_state *x);
-extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type);
-extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
-extern int xfrm_output_resume(struct sk_buff *skb, int err);
-extern int xfrm_output(struct sk_buff *skb);
-extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_extract_header(struct sk_buff *skb);
-extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type);
-extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm4_rcv(struct sk_buff *skb);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output(struct sk_buff *skb);
+int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_header(struct sk_buff *skb);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
- return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
-}
-
-extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_output(struct sk_buff *skb);
-extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm6_extract_header(struct sk_buff *skb);
-extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
-extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm6_rcv(struct sk_buff *skb);
-extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
- xfrm_address_t *saddr, u8 proto);
-extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
-extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
-extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
-extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_output(struct sk_buff *skb);
-extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr);
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+ return xfrm_input(skb, nexthdr, spi, 0);
+}
+
+int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_output(struct sock *sk, struct sk_buff *skb);
+int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
+int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_extract_header(struct sk_buff *skb);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+ xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
+int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
+int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_output(struct sock *sk, struct sk_buff *skb);
+int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
+ u8 **prevhdr);
#ifdef CONFIG_XFRM
-extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname,
+ u8 __user *optval, int optlen);
#else
static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
@@ -1451,75 +1577,81 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
-extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
-extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
- int (*func)(struct xfrm_policy *, int, int, void*), void *);
-extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+ int (*func)(struct xfrm_policy *, int, int, void*),
+ void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
-int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
+ u32 id, int delete, int *err);
+int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
u32 xfrm_get_acqseq(void);
-extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
-struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
+int verify_spi_info(u8 proto, u32 min, u32 max);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr, int create,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create,
unsigned short family);
-extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
#ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k);
-extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
-extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
- struct xfrm_migrate *m);
-extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k);
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+ struct xfrm_migrate *m);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles,
+ struct xfrm_kmaddress *k, struct net *net);
#endif
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
-extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-
-extern void xfrm_input_init(void);
-extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
-
-extern void xfrm_probe_algs(void);
-extern int xfrm_count_auth_supported(void);
-extern int xfrm_count_enc_supported(void);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
- int probe);
-
-struct hash_desc;
-struct scatterlist;
-typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
- unsigned int);
-
-static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
- int family)
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+ xfrm_address_t *addr);
+
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+ int probe);
+
+static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
+ const xfrm_address_t *b)
+{
+ return ipv6_addr_equal((const struct in6_addr *)a,
+ (const struct in6_addr *)b);
+}
+
+static inline bool xfrm_addr_equal(const xfrm_address_t *a,
+ const xfrm_address_t *b,
+ sa_family_t family)
{
switch (family) {
default:
case AF_INET:
- return (__force u32)a->a4 - (__force u32)b->a4;
+ return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
case AF_INET6:
- return ipv6_addr_cmp((struct in6_addr *)a,
- (struct in6_addr *)b);
+ return xfrm6_addr_equal(a, b);
}
}
@@ -1541,19 +1673,71 @@ static inline int xfrm_aevent_is_on(struct net *net)
rcu_read_unlock();
return ret;
}
+
+static inline int xfrm_acquire_is_on(struct net *net)
+{
+ struct sock *nlsk;
+ int ret = 0;
+
+ rcu_read_lock();
+ nlsk = rcu_dereference(net->xfrm.nlsk);
+ if (nlsk)
+ ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
+ rcu_read_unlock();
+
+ return ret;
+}
#endif
-static inline int xfrm_alg_len(struct xfrm_algo *alg)
+static inline int aead_len(struct xfrm_algo_aead *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
+static inline int xfrm_alg_len(const struct xfrm_algo *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
-static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
+static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
+static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
+{
+ return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
+}
+
#ifdef CONFIG_XFRM_MIGRATE
+static inline int xfrm_replay_clone(struct xfrm_state *x,
+ struct xfrm_state *orig)
+{
+ x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+ GFP_KERNEL);
+ if (!x->replay_esn)
+ return -ENOMEM;
+
+ x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
+ x->replay_esn->replay_window = orig->replay_esn->replay_window;
+
+ x->preplay_esn = kmemdup(x->replay_esn,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ GFP_KERNEL);
+ if (!x->preplay_esn) {
+ kfree(x->replay_esn);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
+{
+ return kmemdup(orig, aead_len(orig), GFP_KERNEL);
+}
+
+
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
@@ -1596,14 +1780,33 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
return m->v & m->m;
}
-static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
+static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
{
- if (m->m | m->v)
- NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
- return 0;
+ int ret = 0;
-nla_put_failure:
- return -1;
+ if (m->m | m->v)
+ ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+ return ret;
}
+static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
+ unsigned int family)
+{
+ bool tunnel = false;
+
+ switch(family) {
+ case AF_INET:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
+ tunnel = true;
+ break;
+ case AF_INET6:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
+ tunnel = true;
+ break;
+ }
+ if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
+ return -EINVAL;
+
+ return 0;
+}
#endif /* _NET_XFRM_H */