diff options
Diffstat (limited to 'include/net')
280 files changed, 36440 insertions, 12716 deletions
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h new file mode 100644 index 00000000000..79b530fb2c4 --- /dev/null +++ b/include/net/6lowpan.h @@ -0,0 +1,435 @@ +/* + * Copyright 2011, Siemens AG + * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +/* + * Based on patches from Jon Smirl <jonsmirl@gmail.com> + * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* Jon's code is based on 6lowpan implementation for Contiki which is: + * Copyright (c) 2008, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef __6LOWPAN_H__ +#define __6LOWPAN_H__ + +#include <net/ipv6.h> +#include <net/net_namespace.h> + +#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ +#define UIP_IPH_LEN 40 /* ipv6 fixed header size */ +#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ +#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */ + +/* + * ipv6 address based on mac + * second bit-flip (Universe/Local) is done according RFC2464 + */ +#define is_addr_mac_addr_based(a, m) \ + ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \ + (((a)->s6_addr[9]) == (m)[1]) && \ + (((a)->s6_addr[10]) == (m)[2]) && \ + (((a)->s6_addr[11]) == (m)[3]) && \ + (((a)->s6_addr[12]) == (m)[4]) && \ + (((a)->s6_addr[13]) == (m)[5]) && \ + (((a)->s6_addr[14]) == (m)[6]) && \ + (((a)->s6_addr[15]) == (m)[7])) + +/* ipv6 address is unspecified */ +#define is_addr_unspecified(a) \ + ((((a)->s6_addr32[0]) == 0) && \ + (((a)->s6_addr32[1]) == 0) && \ + (((a)->s6_addr32[2]) == 0) && \ + (((a)->s6_addr32[3]) == 0)) + +/* compare ipv6 addresses prefixes */ +#define ipaddr_prefixcmp(addr1, addr2, length) \ + (memcmp(addr1, addr2, length >> 3) == 0) + +/* local link, i.e. FE80::/10 */ +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) + +/* + * check whether we can compress the IID to 16 bits, + * it's possible for unicast adresses with first 49 bits are zero only. + */ +#define lowpan_is_iid_16_bit_compressable(a) \ + ((((a)->s6_addr16[4]) == 0) && \ + (((a)->s6_addr[10]) == 0) && \ + (((a)->s6_addr[11]) == 0xff) && \ + (((a)->s6_addr[12]) == 0xfe) && \ + (((a)->s6_addr[13]) == 0)) + +/* multicast address */ +#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF) + +/* check whether the 112-bit gid of the multicast address is mappable to: */ + +/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */ +#define lowpan_is_mcast_addr_compressable(a) \ + ((((a)->s6_addr16[1]) == 0) && \ + (((a)->s6_addr16[2]) == 0) && \ + (((a)->s6_addr16[3]) == 0) && \ + (((a)->s6_addr16[4]) == 0) && \ + (((a)->s6_addr16[5]) == 0) && \ + (((a)->s6_addr16[6]) == 0) && \ + (((a)->s6_addr[14]) == 0) && \ + ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2))) + +/* 48 bits, FFXX::00XX:XXXX:XXXX */ +#define lowpan_is_mcast_addr_compressable48(a) \ + ((((a)->s6_addr16[1]) == 0) && \ + (((a)->s6_addr16[2]) == 0) && \ + (((a)->s6_addr16[3]) == 0) && \ + (((a)->s6_addr16[4]) == 0) && \ + (((a)->s6_addr[10]) == 0)) + +/* 32 bits, FFXX::00XX:XXXX */ +#define lowpan_is_mcast_addr_compressable32(a) \ + ((((a)->s6_addr16[1]) == 0) && \ + (((a)->s6_addr16[2]) == 0) && \ + (((a)->s6_addr16[3]) == 0) && \ + (((a)->s6_addr16[4]) == 0) && \ + (((a)->s6_addr16[5]) == 0) && \ + (((a)->s6_addr[12]) == 0)) + +/* 8 bits, FF02::00XX */ +#define lowpan_is_mcast_addr_compressable8(a) \ + ((((a)->s6_addr[1]) == 2) && \ + (((a)->s6_addr16[1]) == 0) && \ + (((a)->s6_addr16[2]) == 0) && \ + (((a)->s6_addr16[3]) == 0) && \ + (((a)->s6_addr16[4]) == 0) && \ + (((a)->s6_addr16[5]) == 0) && \ + (((a)->s6_addr16[6]) == 0) && \ + (((a)->s6_addr[14]) == 0)) + +#define lowpan_is_addr_broadcast(a) \ + ((((a)[0]) == 0xFF) && \ + (((a)[1]) == 0xFF) && \ + (((a)[2]) == 0xFF) && \ + (((a)[3]) == 0xFF) && \ + (((a)[4]) == 0xFF) && \ + (((a)[5]) == 0xFF) && \ + (((a)[6]) == 0xFF) && \ + (((a)[7]) == 0xFF)) + +#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ +#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */ +#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ +#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */ +#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */ + +#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */ + +#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */ + +#define LOWPAN_FRAG1_HEAD_SIZE 0x4 +#define LOWPAN_FRAGN_HEAD_SIZE 0x5 + +/* + * According IEEE802.15.4 standard: + * - MTU is 127 octets + * - maximum MHR size is 37 octets + * - MFR size is 2 octets + * + * so minimal payload size that we may guarantee is: + * MTU - MHR - MFR = 88 octets + */ +#define LOWPAN_FRAG_SIZE 88 + +/* + * Values of fields within the IPHC encoding first byte + * (C stands for compressed and I for inline) + */ +#define LOWPAN_IPHC_TF 0x18 + +#define LOWPAN_IPHC_FL_C 0x10 +#define LOWPAN_IPHC_TC_C 0x08 +#define LOWPAN_IPHC_NH_C 0x04 +#define LOWPAN_IPHC_TTL_1 0x01 +#define LOWPAN_IPHC_TTL_64 0x02 +#define LOWPAN_IPHC_TTL_255 0x03 +#define LOWPAN_IPHC_TTL_I 0x00 + + +/* Values of fields within the IPHC encoding second byte */ +#define LOWPAN_IPHC_CID 0x80 + +#define LOWPAN_IPHC_ADDR_00 0x00 +#define LOWPAN_IPHC_ADDR_01 0x01 +#define LOWPAN_IPHC_ADDR_02 0x02 +#define LOWPAN_IPHC_ADDR_03 0x03 + +#define LOWPAN_IPHC_SAC 0x40 +#define LOWPAN_IPHC_SAM 0x30 + +#define LOWPAN_IPHC_SAM_BIT 4 + +#define LOWPAN_IPHC_M 0x08 +#define LOWPAN_IPHC_DAC 0x04 +#define LOWPAN_IPHC_DAM_00 0x00 +#define LOWPAN_IPHC_DAM_01 0x01 +#define LOWPAN_IPHC_DAM_10 0x02 +#define LOWPAN_IPHC_DAM_11 0x03 + +#define LOWPAN_IPHC_DAM_BIT 0 +/* + * LOWPAN_UDP encoding (works together with IPHC) + */ +#define LOWPAN_NHC_UDP_MASK 0xF8 +#define LOWPAN_NHC_UDP_ID 0xF0 +#define LOWPAN_NHC_UDP_CHECKSUMC 0x04 +#define LOWPAN_NHC_UDP_CHECKSUMI 0x00 + +#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0 +#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0 +#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000 +#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00 + +/* values for port compression, _with checksum_ ie bit 5 set to 0 */ +#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */ +#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline, + dest = 0xF0 + 8 bit inline */ +#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline, + dest = 16 bit inline */ +#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ +#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */ + +#ifdef DEBUG +/* print data in line */ +static inline void raw_dump_inline(const char *caller, char *msg, + unsigned char *buf, int len) +{ + if (msg) + pr_debug("%s():%s: ", caller, msg); + + print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false); +} + +/* print data in a table format: + * + * addr: xx xx xx xx xx xx + * addr: xx xx xx xx xx xx + * ... + */ +static inline void raw_dump_table(const char *caller, char *msg, + unsigned char *buf, int len) +{ + if (msg) + pr_debug("%s():%s:\n", caller, msg); + + print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); +} +#else +static inline void raw_dump_table(const char *caller, char *msg, + unsigned char *buf, int len) { } +static inline void raw_dump_inline(const char *caller, char *msg, + unsigned char *buf, int len) { } +#endif + +static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) +{ + if (unlikely(!pskb_may_pull(skb, 1))) + return -EINVAL; + + *val = skb->data[0]; + skb_pull(skb, 1); + + return 0; +} + +static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val) +{ + if (unlikely(!pskb_may_pull(skb, 2))) + return -EINVAL; + + *val = (skb->data[0] << 8) | skb->data[1]; + skb_pull(skb, 2); + + return 0; +} + +static inline bool lowpan_fetch_skb(struct sk_buff *skb, + void *data, const unsigned int len) +{ + if (unlikely(!pskb_may_pull(skb, len))) + return true; + + skb_copy_from_linear_data(skb, data, len); + skb_pull(skb, len); + + return false; +} + +static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data, + const size_t len) +{ + memcpy(*hc_ptr, data, len); + *hc_ptr += len; +} + +static inline u8 lowpan_addr_mode_size(const u8 addr_mode) +{ + static const u8 addr_sizes[] = { + [LOWPAN_IPHC_ADDR_00] = 16, + [LOWPAN_IPHC_ADDR_01] = 8, + [LOWPAN_IPHC_ADDR_02] = 2, + [LOWPAN_IPHC_ADDR_03] = 0, + }; + return addr_sizes[addr_mode]; +} + +static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header) +{ + u8 ret = 1; + + if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { + *uncomp_header += sizeof(struct udphdr); + + switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) { + case LOWPAN_NHC_UDP_CS_P_00: + ret += 4; + break; + case LOWPAN_NHC_UDP_CS_P_01: + case LOWPAN_NHC_UDP_CS_P_10: + ret += 3; + break; + case LOWPAN_NHC_UDP_CS_P_11: + ret++; + break; + default: + break; + } + + if (!(h_enc & LOWPAN_NHC_UDP_CS_C)) + ret += 2; + } + + return ret; +} + +/** + * lowpan_uncompress_size - returns skb->len size with uncompressed header + * @skb: sk_buff with 6lowpan header inside + * @datagram_offset: optional to get the datagram_offset value + * + * Returns the skb->len with uncompressed header + */ +static inline u16 +lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) +{ + u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr); + u8 iphc0, iphc1, h_enc; + + iphc0 = skb_network_header(skb)[0]; + iphc1 = skb_network_header(skb)[1]; + + switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { + case 0: + ret += 4; + break; + case 1: + ret += 3; + break; + case 2: + ret++; + break; + default: + break; + } + + if (!(iphc0 & LOWPAN_IPHC_NH_C)) + ret++; + + if (!(iphc0 & 0x03)) + ret++; + + ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >> + LOWPAN_IPHC_SAM_BIT); + + if (iphc1 & LOWPAN_IPHC_M) { + switch ((iphc1 & LOWPAN_IPHC_DAM_11) >> + LOWPAN_IPHC_DAM_BIT) { + case LOWPAN_IPHC_DAM_00: + ret += 16; + break; + case LOWPAN_IPHC_DAM_01: + ret += 6; + break; + case LOWPAN_IPHC_DAM_10: + ret += 4; + break; + case LOWPAN_IPHC_DAM_11: + ret++; + break; + default: + break; + } + } else { + ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >> + LOWPAN_IPHC_DAM_BIT); + } + + if (iphc0 & LOWPAN_IPHC_NH_C) { + h_enc = skb_network_header(skb)[ret]; + ret += lowpan_next_hdr_size(h_enc, &uncomp_header); + } + + if (dgram_offset) + *dgram_offset = uncomp_header; + + return skb->len + uncomp_header - ret; +} + +typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev); + +int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, + const u8 *saddr, const u8 saddr_type, const u8 saddr_len, + const u8 *daddr, const u8 daddr_type, const u8 daddr_len, + u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver); +int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *_daddr, + const void *_saddr, unsigned int len); + +#endif /* __6LOWPAN_H__ */ diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 585eb449699..27dfe85772b 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -27,44 +27,147 @@ #ifndef NET_9P_H #define NET_9P_H -#ifdef CONFIG_NET_9P_DEBUG +/** + * enum p9_debug_flags - bits for mount time debug parameter + * @P9_DEBUG_ERROR: more verbose error messages including original error string + * @P9_DEBUG_9P: 9P protocol tracing + * @P9_DEBUG_VFS: VFS API tracing + * @P9_DEBUG_CONV: protocol conversion tracing + * @P9_DEBUG_MUX: trace management of concurrent transactions + * @P9_DEBUG_TRANS: transport tracing + * @P9_DEBUG_SLABS: memory management tracing + * @P9_DEBUG_FCALL: verbose dump of protocol messages + * @P9_DEBUG_FID: fid allocation/deallocation tracking + * @P9_DEBUG_PKT: packet marshalling/unmarshalling + * @P9_DEBUG_FSC: FS-cache tracing + * @P9_DEBUG_VPKT: Verbose packet debugging (full packet dump) + * + * These flags are passed at mount time to turn on various levels of + * verbosity and tracing which will be output to the system logs. + */ -#define P9_DEBUG_ERROR (1<<0) -#define P9_DEBUG_9P (1<<2) -#define P9_DEBUG_VFS (1<<3) -#define P9_DEBUG_CONV (1<<4) -#define P9_DEBUG_MUX (1<<5) -#define P9_DEBUG_TRANS (1<<6) -#define P9_DEBUG_SLABS (1<<7) -#define P9_DEBUG_FCALL (1<<8) +enum p9_debug_flags { + P9_DEBUG_ERROR = (1<<0), + P9_DEBUG_9P = (1<<2), + P9_DEBUG_VFS = (1<<3), + P9_DEBUG_CONV = (1<<4), + P9_DEBUG_MUX = (1<<5), + P9_DEBUG_TRANS = (1<<6), + P9_DEBUG_SLABS = (1<<7), + P9_DEBUG_FCALL = (1<<8), + P9_DEBUG_FID = (1<<9), + P9_DEBUG_PKT = (1<<10), + P9_DEBUG_FSC = (1<<11), + P9_DEBUG_VPKT = (1<<12), +}; +#ifdef CONFIG_NET_9P_DEBUG extern unsigned int p9_debug_level; - -#define P9_DPRINTK(level, format, arg...) \ -do { \ - if ((p9_debug_level & level) == level) \ - printk(KERN_NOTICE "-- %s (%d): " \ - format , __FUNCTION__, task_pid_nr(current) , ## arg); \ -} while (0) - -#define PRINT_FCALL_ERROR(s, fcall) P9_DPRINTK(P9_DEBUG_ERROR, \ - "%s: %.*s\n", s, fcall?fcall->params.rerror.error.len:0, \ - fcall?fcall->params.rerror.error.str:""); - +__printf(3, 4) +void _p9_debug(enum p9_debug_flags level, const char *func, + const char *fmt, ...); +#define p9_debug(level, fmt, ...) \ + _p9_debug(level, __func__, fmt, ##__VA_ARGS__) #else -#define P9_DPRINTK(level, format, arg...) do { } while (0) -#define PRINT_FCALL_ERROR(s, fcall) do { } while (0) +#define p9_debug(level, fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) #endif -#define P9_EPRINTK(level, format, arg...) \ -do { \ - printk(level "9p: %s (%d): " \ - format , __FUNCTION__, task_pid_nr(current), ## arg); \ -} while (0) - +/** + * enum p9_msg_t - 9P message types + * @P9_TLERROR: not used + * @P9_RLERROR: response for any failed request for 9P2000.L + * @P9_TSTATFS: file system status request + * @P9_RSTATFS: file system status response + * @P9_TSYMLINK: make symlink request + * @P9_RSYMLINK: make symlink response + * @P9_TMKNOD: create a special file object request + * @P9_RMKNOD: create a special file object response + * @P9_TLCREATE: prepare a handle for I/O on an new file for 9P2000.L + * @P9_RLCREATE: response with file access information for 9P2000.L + * @P9_TRENAME: rename request + * @P9_RRENAME: rename response + * @P9_TMKDIR: create a directory request + * @P9_RMKDIR: create a directory response + * @P9_TVERSION: version handshake request + * @P9_RVERSION: version handshake response + * @P9_TAUTH: request to establish authentication channel + * @P9_RAUTH: response with authentication information + * @P9_TATTACH: establish user access to file service + * @P9_RATTACH: response with top level handle to file hierarchy + * @P9_TERROR: not used + * @P9_RERROR: response for any failed request + * @P9_TFLUSH: request to abort a previous request + * @P9_RFLUSH: response when previous request has been cancelled + * @P9_TWALK: descend a directory hierarchy + * @P9_RWALK: response with new handle for position within hierarchy + * @P9_TOPEN: prepare a handle for I/O on an existing file + * @P9_ROPEN: response with file access information + * @P9_TCREATE: prepare a handle for I/O on a new file + * @P9_RCREATE: response with file access information + * @P9_TREAD: request to transfer data from a file or directory + * @P9_RREAD: response with data requested + * @P9_TWRITE: reuqest to transfer data to a file + * @P9_RWRITE: response with out much data was transferred to file + * @P9_TCLUNK: forget about a handle to an entity within the file system + * @P9_RCLUNK: response when server has forgotten about the handle + * @P9_TREMOVE: request to remove an entity from the hierarchy + * @P9_RREMOVE: response when server has removed the entity + * @P9_TSTAT: request file entity attributes + * @P9_RSTAT: response with file entity attributes + * @P9_TWSTAT: request to update file entity attributes + * @P9_RWSTAT: response when file entity attributes are updated + * + * There are 14 basic operations in 9P2000, paired as + * requests and responses. The one special case is ERROR + * as there is no @P9_TERROR request for clients to transmit to + * the server, but the server may respond to any other request + * with an @P9_RERROR. + * + * See Also: http://plan9.bell-labs.com/sys/man/5/INDEX.html + */ -/* Message Types */ -enum { +enum p9_msg_t { + P9_TLERROR = 6, + P9_RLERROR, + P9_TSTATFS = 8, + P9_RSTATFS, + P9_TLOPEN = 12, + P9_RLOPEN, + P9_TLCREATE = 14, + P9_RLCREATE, + P9_TSYMLINK = 16, + P9_RSYMLINK, + P9_TMKNOD = 18, + P9_RMKNOD, + P9_TRENAME = 20, + P9_RRENAME, + P9_TREADLINK = 22, + P9_RREADLINK, + P9_TGETATTR = 24, + P9_RGETATTR, + P9_TSETATTR = 26, + P9_RSETATTR, + P9_TXATTRWALK = 30, + P9_RXATTRWALK, + P9_TXATTRCREATE = 32, + P9_RXATTRCREATE, + P9_TREADDIR = 40, + P9_RREADDIR, + P9_TFSYNC = 50, + P9_RFSYNC, + P9_TLOCK = 52, + P9_RLOCK, + P9_TGETLOCK = 54, + P9_RGETLOCK, + P9_TLINK = 70, + P9_RLINK, + P9_TMKDIR = 72, + P9_RMKDIR, + P9_TRENAMEAT = 74, + P9_RRENAMEAT, + P9_TUNLINKAT = 76, + P9_RUNLINKAT, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, @@ -95,30 +198,71 @@ enum { P9_RWSTAT, }; -/* open modes */ -enum { +/** + * enum p9_open_mode_t - 9P open modes + * @P9_OREAD: open file for reading only + * @P9_OWRITE: open file for writing only + * @P9_ORDWR: open file for reading or writing + * @P9_OEXEC: open file for execution + * @P9_OTRUNC: truncate file to zero-length before opening it + * @P9_OREXEC: close the file when an exec(2) system call is made + * @P9_ORCLOSE: remove the file when the file is closed + * @P9_OAPPEND: open the file and seek to the end + * @P9_OEXCL: only create a file, do not open it + * + * 9P open modes differ slightly from Posix standard modes. + * In particular, there are extra modes which specify different + * semantic behaviors than may be available on standard Posix + * systems. For example, @P9_OREXEC and @P9_ORCLOSE are modes that + * most likely will not be issued from the Linux VFS client, but may + * be supported by servers. + * + * See Also: http://plan9.bell-labs.com/magic/man2html/2/open + */ + +enum p9_open_mode_t { P9_OREAD = 0x00, P9_OWRITE = 0x01, P9_ORDWR = 0x02, P9_OEXEC = 0x03, - P9_OEXCL = 0x04, P9_OTRUNC = 0x10, P9_OREXEC = 0x20, P9_ORCLOSE = 0x40, P9_OAPPEND = 0x80, -}; - -/* permissions */ -enum { + P9_OEXCL = 0x1000, +}; + +/** + * enum p9_perm_t - 9P permissions + * @P9_DMDIR: mode bit for directories + * @P9_DMAPPEND: mode bit for is append-only + * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed) + * @P9_DMMOUNT: mode bit for mount points + * @P9_DMAUTH: mode bit for authentication file + * @P9_DMTMP: mode bit for non-backed-up files + * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u) + * @P9_DMLINK: mode bit for hard-link (9P2000.u) + * @P9_DMDEVICE: mode bit for device files (9P2000.u) + * @P9_DMNAMEDPIPE: mode bit for named pipe (9P2000.u) + * @P9_DMSOCKET: mode bit for socket (9P2000.u) + * @P9_DMSETUID: mode bit for setuid (9P2000.u) + * @P9_DMSETGID: mode bit for setgid (9P2000.u) + * @P9_DMSETVTX: mode bit for sticky bit (9P2000.u) + * + * 9P permissions differ slightly from Posix standard modes. + * + * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat + */ +enum p9_perm_t { P9_DMDIR = 0x80000000, P9_DMAPPEND = 0x40000000, P9_DMEXCL = 0x20000000, P9_DMMOUNT = 0x10000000, P9_DMAUTH = 0x08000000, P9_DMTMP = 0x04000000, +/* 9P2000.u extensions */ P9_DMSYMLINK = 0x02000000, P9_DMLINK = 0x01000000, - /* 9P2000.u extensions */ P9_DMDEVICE = 0x00800000, P9_DMNAMEDPIPE = 0x00200000, P9_DMSOCKET = 0x00100000, @@ -127,8 +271,55 @@ enum { P9_DMSETVTX = 0x00010000, }; -/* qid.types */ -enum { +/* 9p2000.L open flags */ +#define P9_DOTL_RDONLY 00000000 +#define P9_DOTL_WRONLY 00000001 +#define P9_DOTL_RDWR 00000002 +#define P9_DOTL_NOACCESS 00000003 +#define P9_DOTL_CREATE 00000100 +#define P9_DOTL_EXCL 00000200 +#define P9_DOTL_NOCTTY 00000400 +#define P9_DOTL_TRUNC 00001000 +#define P9_DOTL_APPEND 00002000 +#define P9_DOTL_NONBLOCK 00004000 +#define P9_DOTL_DSYNC 00010000 +#define P9_DOTL_FASYNC 00020000 +#define P9_DOTL_DIRECT 00040000 +#define P9_DOTL_LARGEFILE 00100000 +#define P9_DOTL_DIRECTORY 00200000 +#define P9_DOTL_NOFOLLOW 00400000 +#define P9_DOTL_NOATIME 01000000 +#define P9_DOTL_CLOEXEC 02000000 +#define P9_DOTL_SYNC 04000000 + +/* 9p2000.L at flags */ +#define P9_DOTL_AT_REMOVEDIR 0x200 + +/* 9p2000.L lock type */ +#define P9_LOCK_TYPE_RDLCK 0 +#define P9_LOCK_TYPE_WRLCK 1 +#define P9_LOCK_TYPE_UNLCK 2 + +/** + * enum p9_qid_t - QID types + * @P9_QTDIR: directory + * @P9_QTAPPEND: append-only + * @P9_QTEXCL: excluse use (only one open handle allowed) + * @P9_QTMOUNT: mount points + * @P9_QTAUTH: authentication file + * @P9_QTTMP: non-backed-up files + * @P9_QTSYMLINK: symbolic links (9P2000.u) + * @P9_QTLINK: hard-link (9P2000.u) + * @P9_QTFILE: normal files + * + * QID types are a subset of permissions - they are primarily + * used to differentiate semantics for a file system entity via + * a jump-table. Their value is also the most significant 16 bits + * of the permission_t + * + * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat + */ +enum p9_qid_t { P9_QTDIR = 0x80, P9_QTAPPEND = 0x40, P9_QTEXCL = 0x20, @@ -140,6 +331,7 @@ enum { P9_QTFILE = 0x00, }; +/* 9P Magic Numbers */ #define P9_NOTAG (u16)(~0) #define P9_NOFID (u32)(~0) #define P9_MAXWELEM 16 @@ -147,42 +339,60 @@ enum { /* ample room for Twrite/Rread header */ #define P9_IOHDRSZ 24 -struct p9_str { - u16 len; - char *str; -}; +/* Room for readdir header */ +#define P9_READDIRHDRSZ 24 + +/* size of header for zero copy read/write */ +#define P9_ZC_HDR_SZ 4096 + +/** + * struct p9_qid - file system entity information + * @type: 8-bit type &p9_qid_t + * @version: 16-bit monotonically incrementing version number + * @path: 64-bit per-server-unique ID for a file system element + * + * qids are identifiers used by 9P servers to track file system + * entities. The type is used to differentiate semantics for operations + * on the entity (ie. read means something different on a directory than + * on a file). The path provides a server unique index for an entity + * (roughly analogous to an inode number), while the version is updated + * every time a file is modified and can be used to maintain cache + * coherency between clients and serves. + * Servers will often differentiate purely synthetic entities by setting + * their version to 0, signaling that they should never be cached and + * should be accessed synchronously. + * + * See Also://plan9.bell-labs.com/magic/man2html/2/stat + */ -/* qids are the unique ID for a file (like an inode */ struct p9_qid { u8 type; u32 version; u64 path; }; -/* Plan 9 file metadata (stat) structure */ -struct p9_stat { - u16 size; - u16 type; - u32 dev; - struct p9_qid qid; - u32 mode; - u32 atime; - u32 mtime; - u64 length; - struct p9_str name; - struct p9_str uid; - struct p9_str gid; - struct p9_str muid; - struct p9_str extension; /* 9p2000.u extensions */ - u32 n_uid; /* 9p2000.u extensions */ - u32 n_gid; /* 9p2000.u extensions */ - u32 n_muid; /* 9p2000.u extensions */ -}; +/** + * struct p9_wstat - file system metadata information + * @size: length prefix for this stat structure instance + * @type: the type of the server (equivalent to a major number) + * @dev: the sub-type of the server (equivalent to a minor number) + * @qid: unique id from the server of type &p9_qid + * @mode: Plan 9 format permissions of type &p9_perm_t + * @atime: Last access/read time + * @mtime: Last modify/write time + * @length: file length + * @name: last element of path (aka filename) + * @uid: owner name + * @gid: group owner + * @muid: last modifier + * @extension: area used to encode extended UNIX support + * @n_uid: numeric user id of owner (part of 9p2000.u extension) + * @n_gid: numeric group id (part of 9p2000.u extension) + * @n_muid: numeric user id of laster modifier (part of 9p2000.u extension) + * + * See Also: http://plan9.bell-labs.com/magic/man2html/2/stat + */ -/* file metadata (stat) structure used to create Twstat message - The is similar to p9_stat, but the strings don't point to - the same memory block and should be freed separately -*/ struct p9_wstat { u16 size; u16 type; @@ -197,215 +407,162 @@ struct p9_wstat { char *gid; char *muid; char *extension; /* 9p2000.u extensions */ - u32 n_uid; /* 9p2000.u extensions */ - u32 n_gid; /* 9p2000.u extensions */ - u32 n_muid; /* 9p2000.u extensions */ + kuid_t n_uid; /* 9p2000.u extensions */ + kgid_t n_gid; /* 9p2000.u extensions */ + kuid_t n_muid; /* 9p2000.u extensions */ }; -/* Structures for Protocol Operations */ -struct p9_tversion { - u32 msize; - struct p9_str version; -}; - -struct p9_rversion { - u32 msize; - struct p9_str version; -}; - -struct p9_tauth { - u32 afid; - struct p9_str uname; - struct p9_str aname; - u32 n_uname; /* 9P2000.u extensions */ -}; - -struct p9_rauth { +struct p9_stat_dotl { + u64 st_result_mask; struct p9_qid qid; -}; - -struct p9_rerror { - struct p9_str error; - u32 errno; /* 9p2000.u extension */ -}; - -struct p9_tflush { - u16 oldtag; -}; - -struct p9_rflush { -}; - -struct p9_tattach { - u32 fid; - u32 afid; - struct p9_str uname; - struct p9_str aname; - u32 n_uname; /* 9P2000.u extensions */ -}; - -struct p9_rattach { - struct p9_qid qid; -}; - -struct p9_twalk { - u32 fid; - u32 newfid; - u16 nwname; - struct p9_str wnames[16]; -}; - -struct p9_rwalk { - u16 nwqid; - struct p9_qid wqids[16]; -}; - -struct p9_topen { - u32 fid; - u8 mode; -}; - -struct p9_ropen { - struct p9_qid qid; - u32 iounit; -}; - -struct p9_tcreate { - u32 fid; - struct p9_str name; - u32 perm; - u8 mode; - struct p9_str extension; -}; - -struct p9_rcreate { - struct p9_qid qid; - u32 iounit; -}; - -struct p9_tread { - u32 fid; - u64 offset; - u32 count; -}; - -struct p9_rread { - u32 count; - u8 *data; -}; - -struct p9_twrite { - u32 fid; - u64 offset; - u32 count; - u8 *data; -}; - -struct p9_rwrite { - u32 count; -}; - -struct p9_tclunk { - u32 fid; -}; - -struct p9_rclunk { -}; - -struct p9_tremove { - u32 fid; -}; - -struct p9_rremove { -}; - -struct p9_tstat { - u32 fid; -}; + u32 st_mode; + kuid_t st_uid; + kgid_t st_gid; + u64 st_nlink; + u64 st_rdev; + u64 st_size; + u64 st_blksize; + u64 st_blocks; + u64 st_atime_sec; + u64 st_atime_nsec; + u64 st_mtime_sec; + u64 st_mtime_nsec; + u64 st_ctime_sec; + u64 st_ctime_nsec; + u64 st_btime_sec; + u64 st_btime_nsec; + u64 st_gen; + u64 st_data_version; +}; + +#define P9_STATS_MODE 0x00000001ULL +#define P9_STATS_NLINK 0x00000002ULL +#define P9_STATS_UID 0x00000004ULL +#define P9_STATS_GID 0x00000008ULL +#define P9_STATS_RDEV 0x00000010ULL +#define P9_STATS_ATIME 0x00000020ULL +#define P9_STATS_MTIME 0x00000040ULL +#define P9_STATS_CTIME 0x00000080ULL +#define P9_STATS_INO 0x00000100ULL +#define P9_STATS_SIZE 0x00000200ULL +#define P9_STATS_BLOCKS 0x00000400ULL + +#define P9_STATS_BTIME 0x00000800ULL +#define P9_STATS_GEN 0x00001000ULL +#define P9_STATS_DATA_VERSION 0x00002000ULL + +#define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ +#define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ + +/** + * struct p9_iattr_dotl - P9 inode attribute for setattr + * @valid: bitfield specifying which fields are valid + * same as in struct iattr + * @mode: File permission bits + * @uid: user id of owner + * @gid: group id + * @size: File size + * @atime_sec: Last access time, seconds + * @atime_nsec: Last access time, nanoseconds + * @mtime_sec: Last modification time, seconds + * @mtime_nsec: Last modification time, nanoseconds + */ -struct p9_rstat { - struct p9_stat stat; -}; +struct p9_iattr_dotl { + u32 valid; + u32 mode; + kuid_t uid; + kgid_t gid; + u64 size; + u64 atime_sec; + u64 atime_nsec; + u64 mtime_sec; + u64 mtime_nsec; +}; + +#define P9_LOCK_SUCCESS 0 +#define P9_LOCK_BLOCKED 1 +#define P9_LOCK_ERROR 2 +#define P9_LOCK_GRACE 3 + +#define P9_LOCK_FLAGS_BLOCK 1 +#define P9_LOCK_FLAGS_RECLAIM 2 + +/* struct p9_flock: POSIX lock structure + * @type - type of lock + * @flags - lock flags + * @start - starting offset of the lock + * @length - number of bytes + * @proc_id - process id which wants to take lock + * @client_id - client id + */ -struct p9_twstat { - u32 fid; - struct p9_stat stat; +struct p9_flock { + u8 type; + u32 flags; + u64 start; + u64 length; + u32 proc_id; + char *client_id; }; -struct p9_rwstat { -}; +/* struct p9_getlock: getlock structure + * @type - type of lock + * @start - starting offset of the lock + * @length - number of bytes + * @proc_id - process id which wants to take lock + * @client_id - client id + */ -/* - * fcall is the primary packet structure - * - */ +struct p9_getlock { + u8 type; + u64 start; + u64 length; + u32 proc_id; + char *client_id; +}; + +struct p9_rstatfs { + u32 type; + u32 bsize; + u64 blocks; + u64 bfree; + u64 bavail; + u64 files; + u64 ffree; + u64 fsid; + u32 namelen; +}; + +/** + * struct p9_fcall - primary packet structure + * @size: prefixed length of the structure + * @id: protocol operating identifier of type &p9_msg_t + * @tag: transaction id of the request + * @offset: used by marshalling routines to track current position in buffer + * @capacity: used by marshalling routines to track total malloc'd capacity + * @sdata: payload + * + * &p9_fcall represents the structure for all 9P RPC + * transactions. Requests are packaged into fcalls, and reponses + * must be extracted from them. + * + * See Also: http://plan9.bell-labs.com/magic/man2html/2/fcall + */ struct p9_fcall { u32 size; u8 id; u16 tag; - void *sdata; - - union { - struct p9_tversion tversion; - struct p9_rversion rversion; - struct p9_tauth tauth; - struct p9_rauth rauth; - struct p9_rerror rerror; - struct p9_tflush tflush; - struct p9_rflush rflush; - struct p9_tattach tattach; - struct p9_rattach rattach; - struct p9_twalk twalk; - struct p9_rwalk rwalk; - struct p9_topen topen; - struct p9_ropen ropen; - struct p9_tcreate tcreate; - struct p9_rcreate rcreate; - struct p9_tread tread; - struct p9_rread rread; - struct p9_twrite twrite; - struct p9_rwrite rwrite; - struct p9_tclunk tclunk; - struct p9_rclunk rclunk; - struct p9_tremove tremove; - struct p9_rremove rremove; - struct p9_tstat tstat; - struct p9_rstat rstat; - struct p9_twstat twstat; - struct p9_rwstat rwstat; - } params; + + size_t offset; + size_t capacity; + + u8 *sdata; }; struct p9_idpool; -int p9_deserialize_stat(void *buf, u32 buflen, struct p9_stat *stat, - int dotu); -int p9_deserialize_fcall(void *buf, u32 buflen, struct p9_fcall *fc, int dotu); -void p9_set_tag(struct p9_fcall *fc, u16 tag); -struct p9_fcall *p9_create_tversion(u32 msize, char *version); -struct p9_fcall *p9_create_tattach(u32 fid, u32 afid, char *uname, - char *aname, u32 n_uname, int dotu); -struct p9_fcall *p9_create_tauth(u32 afid, char *uname, char *aname, - u32 n_uname, int dotu); -struct p9_fcall *p9_create_tflush(u16 oldtag); -struct p9_fcall *p9_create_twalk(u32 fid, u32 newfid, u16 nwname, - char **wnames); -struct p9_fcall *p9_create_topen(u32 fid, u8 mode); -struct p9_fcall *p9_create_tcreate(u32 fid, char *name, u32 perm, u8 mode, - char *extension, int dotu); -struct p9_fcall *p9_create_tread(u32 fid, u64 offset, u32 count); -struct p9_fcall *p9_create_twrite(u32 fid, u64 offset, u32 count, - const char *data); -struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count, - const char __user *data); -struct p9_fcall *p9_create_tclunk(u32 fid); -struct p9_fcall *p9_create_tremove(u32 fid); -struct p9_fcall *p9_create_tstat(u32 fid); -struct p9_fcall *p9_create_twstat(u32 fid, struct p9_wstat *wstat, - int dotu); - -int p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int dotu); int p9_errstr2errno(char *errstr, int len); struct p9_idpool *p9_idpool_create(void); @@ -415,5 +572,6 @@ void p9_idpool_put(int id, struct p9_idpool *p); int p9_idpool_check(int id, struct p9_idpool *p); int p9_error_init(void); -int p9_errstr2errno(char *, int); +int p9_trans_fd_init(void); +void p9_trans_fd_exit(void); #endif /* NET_9P_H */ diff --git a/include/net/9p/client.h b/include/net/9p/client.h index e52f93d9ac5..6fab66c5c5a 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -26,57 +26,247 @@ #ifndef NET_9P_CLIENT_H #define NET_9P_CLIENT_H +#include <linux/utsname.h> + +/* Number of requests per row */ +#define P9_ROW_MAXTAG 255 + +/** enum p9_proto_versions - 9P protocol versions + * @p9_proto_legacy: 9P Legacy mode, pre-9P2000.u + * @p9_proto_2000u: 9P2000.u extension + * @p9_proto_2000L: 9P2000.L extension + */ + +enum p9_proto_versions{ + p9_proto_legacy, + p9_proto_2000u, + p9_proto_2000L, +}; + + +/** + * enum p9_trans_status - different states of underlying transports + * @Connected: transport is connected and healthy + * @Disconnected: transport has been disconnected + * @Hung: transport is connected by wedged + * + * This enumeration details the various states a transport + * instatiation can be in. + */ + +enum p9_trans_status { + Connected, + BeginDisconnect, + Disconnected, + Hung, +}; + +/** + * enum p9_req_status_t - status of a request + * @REQ_STATUS_IDLE: request slot unused + * @REQ_STATUS_ALLOC: request has been allocated but not sent + * @REQ_STATUS_UNSENT: request waiting to be sent + * @REQ_STATUS_SENT: request sent to server + * @REQ_STATUS_RCVD: response received from server + * @REQ_STATUS_FLSHD: request has been flushed + * @REQ_STATUS_ERROR: request encountered an error on the client side + * + * The @REQ_STATUS_IDLE state is used to mark a request slot as unused + * but use is actually tracked by the idpool structure which handles tag + * id allocation. + * + */ + +enum p9_req_status_t { + REQ_STATUS_IDLE, + REQ_STATUS_ALLOC, + REQ_STATUS_UNSENT, + REQ_STATUS_SENT, + REQ_STATUS_RCVD, + REQ_STATUS_FLSHD, + REQ_STATUS_ERROR, +}; + +/** + * struct p9_req_t - request slots + * @status: status of this request slot + * @t_err: transport error + * @flush_tag: tag of request being flushed (for flush requests) + * @wq: wait_queue for the client to block on for this request + * @tc: the request fcall structure + * @rc: the response fcall structure + * @aux: transport specific data (provided for trans_fd migration) + * @req_list: link for higher level objects to chain requests + * + * Transport use an array to track outstanding requests + * instead of a list. While this may incurr overhead during initial + * allocation or expansion, it makes request lookup much easier as the + * tag id is a index into an array. (We use tag+1 so that we can accommodate + * the -1 tag for the T_VERSION request). + * This also has the nice effect of only having to allocate wait_queues + * once, instead of constantly allocating and freeing them. Its possible + * other resources could benefit from this scheme as well. + * + */ + +struct p9_req_t { + int status; + int t_err; + wait_queue_head_t *wq; + struct p9_fcall *tc; + struct p9_fcall *rc; + void *aux; + + struct list_head req_list; +}; + +/** + * struct p9_client - per client instance state + * @lock: protect @fidlist + * @msize: maximum data size negotiated by protocol + * @dotu: extension flags negotiated by protocol + * @proto_version: 9P protocol version to use + * @trans_mod: module API instantiated with this client + * @trans: tranport instance state and API + * @fidpool: fid handle accounting for session + * @fidlist: List of active fid handles + * @tagpool - transaction id accounting for session + * @reqs - 2D array of requests + * @max_tag - current maximum tag id allocated + * @name - node name used as client id + * + * The client structure is used to keep track of various per-client + * state that has been instantiated. + * In order to minimize per-transaction overhead we use a + * simple array to lookup requests instead of a hash table + * or linked list. In order to support larger number of + * transactions, we make this a 2D array, allocating new rows + * when we need to grow the total number of the transactions. + * + * Each row is 256 requests and we'll support up to 256 rows for + * a total of 64k concurrent requests per session. + * + * Bugs: duplicated data and potentially unnecessary elements. + */ + struct p9_client { spinlock_t lock; /* protect client structure */ - int msize; - unsigned char dotu; + unsigned int msize; + unsigned char proto_version; struct p9_trans_module *trans_mod; - struct p9_trans *trans; - struct p9_conn *conn; + enum p9_trans_status status; + void *trans; struct p9_idpool *fidpool; struct list_head fidlist; + + struct p9_idpool *tagpool; + struct p9_req_t *reqs[P9_ROW_MAXTAG]; + int max_tag; + + char name[__NEW_UTS_LEN + 1]; }; +/** + * struct p9_fid - file system entity handle + * @clnt: back pointer to instantiating &p9_client + * @fid: numeric identifier for this handle + * @mode: current mode of this fid (enum?) + * @qid: the &p9_qid server identifier this handle points to + * @iounit: the server reported maximum transaction size for this file + * @uid: the numeric uid of the local user who owns this handle + * @rdir: readdir accounting structure (allocated on demand) + * @flist: per-client-instance fid tracking + * @dlist: per-dentry fid tracking + * + * TODO: This needs lots of explanation. + */ + struct p9_fid { struct p9_client *clnt; u32 fid; int mode; struct p9_qid qid; u32 iounit; - uid_t uid; - void *aux; + kuid_t uid; + + void *rdir; - int rdir_fpos; - int rdir_pos; - struct p9_fcall *rdir_fcall; struct list_head flist; - struct list_head dlist; /* list of all fids attached to a dentry */ + struct hlist_node dlist; /* list of all fids attached to a dentry */ }; +/** + * struct p9_dirent - directory entry structure + * @qid: The p9 server qid for this dirent + * @d_off: offset to the next dirent + * @d_type: type of file + * @d_name: file name + */ + +struct p9_dirent { + struct p9_qid qid; + u64 d_off; + unsigned char d_type; + char d_name[256]; +}; + +int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); +int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, + const char *name); +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name); struct p9_client *p9_client_create(const char *dev_name, char *options); void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); +void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, - char *uname, u32 n_uname, char *aname); -struct p9_fid *p9_client_auth(struct p9_client *clnt, char *uname, - u32 n_uname, char *aname); -struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, - int clone); + char *uname, kuid_t n_uname, char *aname); +struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, + char **wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, char *extension); +int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); +int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid, + struct p9_qid *qid); +int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, + kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); +int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); -int p9_client_read(struct p9_fid *fid, char *data, u64 offset, u32 count); -int p9_client_readn(struct p9_fid *fid, char *data, u64 offset, u32 count); -int p9_client_write(struct p9_fid *fid, char *data, u64 offset, u32 count); -int p9_client_uread(struct p9_fid *fid, char __user *data, u64 offset, - u32 count); -int p9_client_uwrite(struct p9_fid *fid, const char __user *data, u64 offset, - u32 count); -struct p9_stat *p9_client_stat(struct p9_fid *fid); +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); +int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, + u64 offset, u32 count); +int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, + u64 offset, u32 count); +int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); +int p9dirent_read(struct p9_client *clnt, char *buf, int len, + struct p9_dirent *dirent); +struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); -struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset); +int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); + +struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, + u64 request_mask); + +int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, + dev_t rdev, kgid_t gid, struct p9_qid *); +int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, + kgid_t gid, struct p9_qid *); +int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); +int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); +struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); +void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status); + +int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int); +int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *); +void p9stat_free(struct p9_wstat *); + +int p9_is_proto_dotu(struct p9_client *clnt); +int p9_is_proto_dotl(struct p9_client *clnt); +struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *); +int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int); +int p9_client_readlink(struct p9_fid *fid, char **target); #endif /* NET_9P_CLIENT_H */ diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index d2209ae9d18..d9fa68f26c4 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -26,32 +26,48 @@ #ifndef NET_9P_TRANSPORT_H #define NET_9P_TRANSPORT_H -enum p9_trans_status { - Connected, - Disconnected, - Hung, -}; +#define P9_DEF_MIN_RESVPORT (665U) +#define P9_DEF_MAX_RESVPORT (1023U) -struct p9_trans { - enum p9_trans_status status; - int msize; - unsigned char extended; - void *priv; - void (*close) (struct p9_trans *); - int (*rpc) (struct p9_trans *t, struct p9_fcall *tc, - struct p9_fcall **rc); -}; +/** + * struct p9_trans_module - transport module interface + * @list: used to maintain a list of currently available transports + * @name: the human-readable name of the transport + * @maxsize: transport provided maximum packet size + * @pref: Preferences of this transport + * @def: set if this transport should be considered the default + * @create: member function to create a new connection on this transport + * @close: member function to discard a connection on this transport + * @request: member function to issue a request to the transport + * @cancel: member function to cancel a request (if it hasn't been sent) + * @cancelled: member function to notify that a cancelled request will not + * not receive a reply + * + * This is the basic API for a transport module which is registered by the + * transport module with the 9P core network module and used by the client + * to instantiate a new connection on a transport. + * + * The transport module list is protected by v9fs_trans_lock. + */ struct p9_trans_module { struct list_head list; char *name; /* name of transport */ int maxsize; /* max message size of transport */ int def; /* this transport should be default */ - struct p9_trans * (*create)(const char *, char *, int, unsigned char); + struct module *owner; + int (*create)(struct p9_client *, const char *, char *); + void (*close) (struct p9_client *); + int (*request) (struct p9_client *, struct p9_req_t *req); + int (*cancel) (struct p9_client *, struct p9_req_t *req); + int (*cancelled)(struct p9_client *, struct p9_req_t *req); + int (*zc_request)(struct p9_client *, struct p9_req_t *, + char *, char *, int , int, int, int); }; void v9fs_register_trans(struct p9_trans_module *m); -struct p9_trans_module *v9fs_match_trans(const substring_t *name); -struct p9_trans_module *v9fs_default_trans(void); - +void v9fs_unregister_trans(struct p9_trans_module *m); +struct p9_trans_module *v9fs_get_trans_by_name(char *s); +struct p9_trans_module *v9fs_get_default_trans(void); +void v9fs_put_trans(struct p9_trans_module *m); #endif /* NET_9P_TRANSPORT_H */ diff --git a/include/net/Space.h b/include/net/Space.h new file mode 100644 index 00000000000..8a32771e421 --- /dev/null +++ b/include/net/Space.h @@ -0,0 +1,31 @@ +/* A unified ethernet device probe. This is the easiest way to have every + * ethernet adaptor have the name "eth[0123...]". + */ + +struct net_device *hp100_probe(int unit); +struct net_device *ultra_probe(int unit); +struct net_device *wd_probe(int unit); +struct net_device *ne_probe(int unit); +struct net_device *fmv18x_probe(int unit); +struct net_device *i82596_probe(int unit); +struct net_device *ni65_probe(int unit); +struct net_device *sonic_probe(int unit); +struct net_device *smc_init(int unit); +struct net_device *atarilance_probe(int unit); +struct net_device *sun3lance_probe(int unit); +struct net_device *sun3_82586_probe(int unit); +struct net_device *apne_probe(int unit); +struct net_device *cs89x0_probe(int unit); +struct net_device *mvme147lance_probe(int unit); +struct net_device *tc515_probe(int unit); +struct net_device *lance_probe(int unit); +struct net_device *mac8390_probe(int unit); +struct net_device *mac89x0_probe(int unit); +struct net_device *cops_probe(int unit); +struct net_device *ltpc_probe(void); + +/* Fibre Channel adapters */ +int iph5526_probe(struct net_device *dev); + +/* SBNI adapters */ +int sbni_probe(int unit); diff --git a/include/net/act_api.h b/include/net/act_api.h index 565eed8fe49..3ee4c92afd1 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -9,19 +9,20 @@ #include <net/pkt_sched.h> struct tcf_common { - struct tcf_common *tcfc_next; + struct hlist_node tcfc_head; u32 tcfc_index; int tcfc_refcnt; int tcfc_bindcnt; u32 tcfc_capab; int tcfc_action; struct tcf_t tcfc_tm; - struct gnet_stats_basic tcfc_bstats; + struct gnet_stats_basic_packed tcfc_bstats; struct gnet_stats_queue tcfc_qstats; - struct gnet_stats_rate_est tcfc_rate_est; + struct gnet_stats_rate_est64 tcfc_rate_est; spinlock_t tcfc_lock; + struct rcu_head tcfc_rcu; }; -#define tcf_next common.tcfc_next +#define tcf_head common.tcfc_head #define tcf_index common.tcfc_index #define tcf_refcnt common.tcfc_refcnt #define tcf_bindcnt common.tcfc_bindcnt @@ -32,26 +33,13 @@ struct tcf_common { #define tcf_qstats common.tcfc_qstats #define tcf_rate_est common.tcfc_rate_est #define tcf_lock common.tcfc_lock - -struct tcf_police { - struct tcf_common common; - int tcfp_result; - u32 tcfp_ewma_rate; - u32 tcfp_burst; - u32 tcfp_mtu; - u32 tcfp_toks; - u32 tcfp_ptoks; - psched_time_t tcfp_t_c; - struct qdisc_rate_table *tcfp_R_tab; - struct qdisc_rate_table *tcfp_P_tab; -}; -#define to_police(pc) \ - container_of(pc, struct tcf_police, common) +#define tcf_rcu common.tcfc_rcu struct tcf_hashinfo { - struct tcf_common **htab; + struct hlist_head *htab; unsigned int hmask; - rwlock_t *lock; + spinlock_t lock; + u32 index; }; static inline unsigned int tcf_hash(u32 index, unsigned int hmask) @@ -59,66 +47,80 @@ static inline unsigned int tcf_hash(u32 index, unsigned int hmask) return index & hmask; } +static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask) +{ + int i; + + spin_lock_init(&hf->lock); + hf->index = 0; + hf->hmask = mask; + hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head), + GFP_KERNEL); + if (!hf->htab) + return -ENOMEM; + for (i = 0; i < mask + 1; i++) + INIT_HLIST_HEAD(&hf->htab[i]); + return 0; +} + +static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf) +{ + kfree(hf->htab); +} + #ifdef CONFIG_NET_CLS_ACT #define ACT_P_CREATED 1 #define ACT_P_DELETED 1 -struct tcf_act_hdr { - struct tcf_common common; -}; - struct tc_action { void *priv; - struct tc_action_ops *ops; + const struct tc_action_ops *ops; __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order; - struct tc_action *next; + struct list_head list; }; -#define TCA_CAP_NONE 0 struct tc_action_ops { - struct tc_action_ops *next; + struct list_head head; struct tcf_hashinfo *hinfo; char kind[IFNAMSIZ]; __u32 type; /* TBD to match kind */ - __u32 capab; /* capabilities includes 4 bit version */ struct module *owner; - int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); - int (*get_stats)(struct sk_buff *, struct tc_action *); + int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); - int (*cleanup)(struct tc_action *, int bind); + void (*cleanup)(struct tc_action *, int bind); int (*lookup)(struct tc_action *, u32); - int (*init)(struct nlattr *, struct nlattr *, struct tc_action *, int , int); + int (*init)(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action *act, int ovr, + int bind); int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); }; -extern struct tcf_common *tcf_hash_lookup(u32 index, - struct tcf_hashinfo *hinfo); -extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo); -extern int tcf_hash_release(struct tcf_common *p, int bind, - struct tcf_hashinfo *hinfo); -extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, - int type, struct tc_action *a); -extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo); -extern int tcf_hash_search(struct tc_action *a, u32 index); -extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, - int bind, struct tcf_hashinfo *hinfo); -extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, - struct tc_action *a, int size, - int bind, u32 *idx_gen, - struct tcf_hashinfo *hinfo); -extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo); +int tcf_hash_search(struct tc_action *a, u32 index); +void tcf_hash_destroy(struct tc_action *a); +int tcf_hash_release(struct tc_action *a, int bind); +u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); +int tcf_hash_check(u32 index, struct tc_action *a, int bind); +int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, + int size, int bind); +void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); +void tcf_hash_insert(struct tc_action *a); -extern int tcf_register_action(struct tc_action_ops *a); -extern int tcf_unregister_action(struct tc_action_ops *a); -extern void tcf_action_destroy(struct tc_action *a, int bind); -extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res); -extern struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); -extern struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); -extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int); +int tcf_register_action(struct tc_action_ops *a, unsigned int mask); +int tcf_unregister_action(struct tc_action_ops *a); +int tcf_action_destroy(struct list_head *actions, int bind); +int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, + struct tcf_result *res); +int tcf_action_init(struct net *net, struct nlattr *nla, + struct nlattr *est, char *n, int ovr, + int bind, struct list_head *); +struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, + struct nlattr *est, char *n, int ovr, + int bind); +int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); +int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); +int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); +int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); #endif /* CONFIG_NET_CLS_ACT */ #endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 496503c0384..f679877bb60 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -1,8 +1,6 @@ #ifndef _ADDRCONF_H #define _ADDRCONF_H -#define RETRANS_TIMER HZ - #define MAX_RTR_SOLICITATIONS 3 #define RTR_SOLICITATION_INTERVAL (4*HZ) @@ -10,13 +8,17 @@ #define TEMP_VALID_LIFETIME (7*86400) #define TEMP_PREFERRED_LIFETIME (86400) -#define REGEN_MAX_RETRY (5) +#define REGEN_MAX_RETRY (3) #define MAX_DESYNC_FACTOR (600) #define ADDR_CHECK_FREQUENCY (120*HZ) #define IPV6_MAX_ADDRESSES 16 +#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ / 50 : 1) +#define ADDRCONF_TIMER_FUZZ (HZ / 4) +#define ADDRCONF_TIMER_FUZZ_MAX (HZ) + #include <linux/in.h> #include <linux/in6.h> @@ -44,136 +46,235 @@ struct prefix_info { }; -#ifdef __KERNEL__ - #include <linux/netdevice.h> #include <net/if_inet6.h> #include <net/ipv6.h> -#define IN6_ADDR_HSIZE 16 +#define IN6_ADDR_HSIZE_SHIFT 4 +#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT) -extern int addrconf_init(void); -extern void addrconf_cleanup(void); +int addrconf_init(void); +void addrconf_cleanup(void); -extern int addrconf_add_ifaddr(void __user *arg); -extern int addrconf_del_ifaddr(void __user *arg); -extern int addrconf_set_dstaddr(void __user *arg); +int addrconf_add_ifaddr(struct net *net, void __user *arg); +int addrconf_del_ifaddr(struct net *net, void __user *arg); +int addrconf_set_dstaddr(struct net *net, void __user *arg); -extern int ipv6_chk_addr(struct net *net, - struct in6_addr *addr, - struct net_device *dev, - int strict); +int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -extern int ipv6_chk_home_addr(struct net *net, - struct in6_addr *addr); +int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); #endif -extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, - struct in6_addr *addr, - struct net_device *dev, - int strict); - -extern int ipv6_get_saddr(struct dst_entry *dst, - struct in6_addr *daddr, - struct in6_addr *saddr); -extern int ipv6_dev_get_saddr(struct net_device *dev, - struct in6_addr *daddr, - struct in6_addr *saddr); -extern int ipv6_get_lladdr(struct net_device *dev, - struct in6_addr *addr, - unsigned char banned_flags); -extern int ipv6_rcv_saddr_equal(const struct sock *sk, - const struct sock *sk2); -extern void addrconf_join_solict(struct net_device *dev, - struct in6_addr *addr); -extern void addrconf_leave_solict(struct inet6_dev *idev, - struct in6_addr *addr); + +bool ipv6_chk_custom_prefix(const struct in6_addr *addr, + const unsigned int prefix_len, + struct net_device *dev); + +int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); + +struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, + const struct in6_addr *addr, + struct net_device *dev, int strict); + +int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, + const struct in6_addr *daddr, unsigned int srcprefs, + struct in6_addr *saddr); +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + u32 banned_flags); +int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, + u32 banned_flags); +int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2); +void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); +void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); + +static inline unsigned long addrconf_timeout_fixup(u32 timeout, + unsigned int unit) +{ + if (timeout == 0xffffffff) + return ~0UL; + + /* + * Avoid arithmetic overflow. + * Assuming unit is constant and non-zero, this "if" statement + * will go away on 64bit archs. + */ + if (0xfffffffe > LONG_MAX / unit && timeout > LONG_MAX / unit) + return LONG_MAX / unit; + + return timeout; +} + +static inline int addrconf_finite_timeout(unsigned long timeout) +{ + return ~timeout; +} /* * IPv6 Address Label subsystem (addrlabel.c) */ -extern int ipv6_addr_label_init(void); -extern void ipv6_addr_label_rtnl_register(void); -extern u32 ipv6_addr_label(const struct in6_addr *addr, - int type, int ifindex); +int ipv6_addr_label_init(void); +void ipv6_addr_label_cleanup(void); +void ipv6_addr_label_rtnl_register(void); +u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, + int type, int ifindex); /* * multicast prototypes (mcast.c) */ -extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, - struct in6_addr *addr); -extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, - struct in6_addr *addr); -extern void ipv6_sock_mc_close(struct sock *sk); -extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr, - struct in6_addr *src_addr); - -extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr); -extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr); -extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr); -extern void ipv6_mc_up(struct inet6_dev *idev); -extern void ipv6_mc_down(struct inet6_dev *idev); -extern void ipv6_mc_init_dev(struct inet6_dev *idev); -extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); -extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); - -extern int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group, - struct in6_addr *src_addr); -extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); - -extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); - -extern int ipv6_get_hoplimit(struct net_device *dev); +int ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); +void ipv6_sock_mc_close(struct sock *sk); +bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, + const struct in6_addr *src_addr); + +int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); +int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr); +void ipv6_mc_up(struct inet6_dev *idev); +void ipv6_mc_down(struct inet6_dev *idev); +void ipv6_mc_unmap(struct inet6_dev *idev); +void ipv6_mc_remap(struct inet6_dev *idev); +void ipv6_mc_init_dev(struct inet6_dev *idev); +void ipv6_mc_destroy_dev(struct inet6_dev *idev); +void addrconf_dad_failure(struct inet6_ifaddr *ifp); + +bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, + const struct in6_addr *src_addr); + +void ipv6_mc_dad_complete(struct inet6_dev *idev); + +/* A stub used by vxlan module. This is ugly, ideally these + * symbols should be built into the core kernel. + */ +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex, + const struct in6_addr *addr); + int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, + const struct in6_addr *addr); + int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, + struct flowi6 *fl6); + void (*udpv6_encap_enable)(void); + void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, bool inc_opt); + struct neigh_table *nd_tbl; +}; +extern const struct ipv6_stub *ipv6_stub __read_mostly; /* - * anycast prototypes (anycast.c) + * identify MLD packets for MLD filter exceptions */ -extern int ipv6_sock_ac_join(struct sock *sk,int ifindex,struct in6_addr *addr); -extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex,struct in6_addr *addr); -extern void ipv6_sock_ac_close(struct sock *sk); -extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex); +static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset) +{ + struct icmp6hdr *hdr; + + if (nexthdr != IPPROTO_ICMPV6 || + !pskb_network_may_pull(skb, offset + sizeof(struct icmp6hdr))) + return false; + + hdr = (struct icmp6hdr *)(skb_network_header(skb) + offset); + + switch (hdr->icmp6_type) { + case ICMPV6_MGM_QUERY: + case ICMPV6_MGM_REPORT: + case ICMPV6_MGM_REDUCTION: + case ICMPV6_MLD2_REPORT: + return true; + default: + break; + } + return false; +} -extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr); -extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr); -extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr); +void addrconf_prefix_rcv(struct net_device *dev, + u8 *opt, int len, bool sllao); +/* + * anycast prototypes (anycast.c) + */ +int ipv6_sock_ac_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_ac_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); +void ipv6_sock_ac_close(struct sock *sk); + +int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); +bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, + const struct in6_addr *addr); +bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, + const struct in6_addr *addr); /* Device notifier */ -extern int register_inet6addr_notifier(struct notifier_block *nb); -extern int unregister_inet6addr_notifier(struct notifier_block *nb); - -static inline struct inet6_dev * -__in6_dev_get(struct net_device *dev) +int register_inet6addr_notifier(struct notifier_block *nb); +int unregister_inet6addr_notifier(struct notifier_block *nb); +int inet6addr_notifier_call_chain(unsigned long val, void *v); + +void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, + struct ipv6_devconf *devconf); + +/** + * __in6_dev_get - get inet6_dev pointer from netdevice + * @dev: network device + * + * Caller must hold rcu_read_lock or RTNL, because this function + * does not take a reference on the inet6_dev. + */ +static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev) { - return rcu_dereference(dev->ip6_ptr); + return rcu_dereference_rtnl(dev->ip6_ptr); } -static inline struct inet6_dev * -in6_dev_get(struct net_device *dev) +/** + * in6_dev_get - get inet6_dev pointer from netdevice + * @dev: network device + * + * This version can be used in any context, and takes a reference + * on the inet6_dev. Callers must use in6_dev_put() later to + * release this reference. + */ +static inline struct inet6_dev *in6_dev_get(const struct net_device *dev) { - struct inet6_dev *idev = NULL; + struct inet6_dev *idev; + rcu_read_lock(); - idev = __in6_dev_get(dev); + idev = rcu_dereference(dev->ip6_ptr); if (idev) atomic_inc(&idev->refcnt); rcu_read_unlock(); return idev; } -extern void in6_dev_finish_destroy(struct inet6_dev *idev); +static inline struct neigh_parms *__in6_dev_nd_parms_get_rcu(const struct net_device *dev) +{ + struct inet6_dev *idev = __in6_dev_get(dev); + + return idev ? idev->nd_parms : NULL; +} + +void in6_dev_finish_destroy(struct inet6_dev *idev); -static inline void -in6_dev_put(struct inet6_dev *idev) +static inline void in6_dev_put(struct inet6_dev *idev) { if (atomic_dec_and_test(&idev->refcnt)) in6_dev_finish_destroy(idev); } -#define __in6_dev_put(idev) atomic_dec(&(idev)->refcnt) -#define in6_dev_hold(idev) atomic_inc(&(idev)->refcnt) +static inline void __in6_dev_put(struct inet6_dev *idev) +{ + atomic_dec(&idev->refcnt); +} +static inline void in6_dev_hold(struct inet6_dev *idev) +{ + atomic_inc(&idev->refcnt); +} -extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); +void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); static inline void in6_ifa_put(struct inet6_ifaddr *ifp) { @@ -181,31 +282,17 @@ static inline void in6_ifa_put(struct inet6_ifaddr *ifp) inet6_ifa_finish_destroy(ifp); } -#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt) -#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt) - - -extern void addrconf_forwarding_on(void); -/* - * Hash function taken from net_alias.c - */ - -static __inline__ u8 ipv6_addr_hash(const struct in6_addr *addr) -{ - __u32 word; - - /* - * We perform the hash function over the last 64 bits of the address - * This will include the IEEE address token on links that support it. - */ - - word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); - word ^= (word >> 16); - word ^= (word >> 8); +static inline void __in6_ifa_put(struct inet6_ifaddr *ifp) +{ + atomic_dec(&ifp->refcnt); +} - return ((word ^ (word >> 4)) & 0x0f); +static inline void in6_ifa_hold(struct inet6_ifaddr *ifp) +{ + atomic_inc(&ifp->refcnt); } + /* * compute link-local solicited-node multicast address */ @@ -214,71 +301,58 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr, struct in6_addr *solicited) { ipv6_addr_set(solicited, - __constant_htonl(0xFF020000), 0, - __constant_htonl(0x1), - __constant_htonl(0xFF000000) | addr->s6_addr32[3]); -} - - -static inline void ipv6_addr_all_nodes(struct in6_addr *addr) -{ - ipv6_addr_set(addr, - __constant_htonl(0xFF020000), 0, 0, - __constant_htonl(0x1)); -} - -static inline void ipv6_addr_all_routers(struct in6_addr *addr) -{ - ipv6_addr_set(addr, - __constant_htonl(0xFF020000), 0, 0, - __constant_htonl(0x2)); -} - -static inline int ipv6_addr_is_multicast(const struct in6_addr *addr) -{ - return (addr->s6_addr32[0] & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000); + htonl(0xFF020000), 0, + htonl(0x1), + htonl(0xFF000000) | addr->s6_addr32[3]); } -static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) +static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) { - return (addr->s6_addr32[0] == htonl(0xff020000) && - addr->s6_addr32[1] == 0 && - addr->s6_addr32[2] == 0 && - addr->s6_addr32[3] == htonl(0x00000001)); +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + __be64 *p = (__be64 *)addr; + return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL; +#else + return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | + addr->s6_addr32[1] | addr->s6_addr32[2] | + (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0; +#endif } -static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) +static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) { - return (addr->s6_addr32[0] == htonl(0xff020000) && - addr->s6_addr32[1] == 0 && - addr->s6_addr32[2] == 0 && - addr->s6_addr32[3] == htonl(0x00000002)); +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + __be64 *p = (__be64 *)addr; + return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL; +#else + return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | + addr->s6_addr32[1] | addr->s6_addr32[2] | + (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0; +#endif } -static inline int ipv6_isatap_eui64(u8 *eui, __be32 addr) +static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr) { - eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || - ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || - ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || - ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) || - ipv4_is_test_198(addr) || ipv4_is_multicast(addr) || - ipv4_is_lbcast(addr)) ? 0x00 : 0x02; - eui[1] = 0; - eui[2] = 0x5E; - eui[3] = 0xFE; - memcpy (eui+4, &addr, 4); - return 0; + return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE); } -static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) +static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr) { - return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE)); +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + __be64 *p = (__be64 *)addr; + return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | + ((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) & + cpu_to_be64(0xffffffffff000000UL))) == 0UL; +#else + return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | + addr->s6_addr32[1] | + (addr->s6_addr32[2] ^ htonl(0x00000001)) | + (addr->s6_addr[12] ^ 0xff)) == 0; +#endif } #ifdef CONFIG_PROC_FS -extern int if6_proc_init(void); -extern void if6_proc_exit(void); +int if6_proc_init(void); +void if6_proc_exit(void); #endif #endif -#endif diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h new file mode 100644 index 00000000000..085940f7eee --- /dev/null +++ b/include/net/af_ieee802154.h @@ -0,0 +1,70 @@ +/* + * IEEE 802.15.4 inteface for userspace + * + * Copyright 2007, 2008 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin <slapin@ossfans.org> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + */ + +#ifndef _AF_IEEE802154_H +#define _AF_IEEE802154_H + +#include <linux/socket.h> /* for sa_family_t */ + +enum { + IEEE802154_ADDR_NONE = 0x0, + /* RESERVED = 0x01, */ + IEEE802154_ADDR_SHORT = 0x2, /* 16-bit address + PANid */ + IEEE802154_ADDR_LONG = 0x3, /* 64-bit address + PANid */ +}; + +/* address length, octets */ +#define IEEE802154_ADDR_LEN 8 + +struct ieee802154_addr_sa { + int addr_type; + u16 pan_id; + union { + u8 hwaddr[IEEE802154_ADDR_LEN]; + u16 short_addr; + }; +}; + +#define IEEE802154_PANID_BROADCAST 0xffff +#define IEEE802154_ADDR_BROADCAST 0xffff +#define IEEE802154_ADDR_UNDEF 0xfffe + +struct sockaddr_ieee802154 { + sa_family_t family; /* AF_IEEE802154 */ + struct ieee802154_addr_sa addr; +}; + +/* get/setsockopt */ +#define SOL_IEEE802154 0 + +#define WPAN_WANTACK 0 +#define WPAN_SECURITY 1 +#define WPAN_SECURITY_LEVEL 2 + +#define WPAN_SECURITY_DEFAULT 0 +#define WPAN_SECURITY_OFF 1 +#define WPAN_SECURITY_ON 2 + +#define WPAN_SECURITY_LEVEL_DEFAULT (-1) + +#endif diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 00c2eaa07c2..e797d45a5ae 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -12,8 +12,6 @@ #ifndef _NET_RXRPC_H #define _NET_RXRPC_H -#ifdef __KERNEL__ - #include <linux/rxrpc.h> struct rxrpc_call; @@ -33,25 +31,21 @@ enum { typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long, struct sk_buff *); -extern void rxrpc_kernel_intercept_rx_messages(struct socket *, - rxrpc_interceptor_t); -extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, - struct sockaddr_rxrpc *, - struct key *, - unsigned long, - gfp_t); -extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, - size_t); -extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); -extern void rxrpc_kernel_end_call(struct rxrpc_call *); -extern bool rxrpc_kernel_is_data_last(struct sk_buff *); -extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *); -extern int rxrpc_kernel_get_error_number(struct sk_buff *); -extern void rxrpc_kernel_data_delivered(struct sk_buff *); -extern void rxrpc_kernel_free_skb(struct sk_buff *); -extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, - unsigned long); -extern int rxrpc_kernel_reject_call(struct socket *); +void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t); +struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, + struct sockaddr_rxrpc *, + struct key *, + unsigned long, + gfp_t); +int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); +void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); +void rxrpc_kernel_end_call(struct rxrpc_call *); +bool rxrpc_kernel_is_data_last(struct sk_buff *); +u32 rxrpc_kernel_get_abort_code(struct sk_buff *); +int rxrpc_kernel_get_error_number(struct sk_buff *); +void rxrpc_kernel_data_delivered(struct sk_buff *); +void rxrpc_kernel_free_skb(struct sk_buff *); +struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); +int rxrpc_kernel_reject_call(struct socket *); -#endif /* __KERNEL__ */ #endif /* _NET_RXRPC_H */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 2dfa96b0575..a175ba4a7ad 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -6,31 +6,39 @@ #include <linux/mutex.h> #include <net/sock.h> -extern void unix_inflight(struct file *fp); -extern void unix_notinflight(struct file *fp); -extern void unix_gc(void); +void unix_inflight(struct file *fp); +void unix_notinflight(struct file *fp); +void unix_gc(void); +void wait_for_unix_gc(void); +struct sock *unix_get_socket(struct file *filp); +struct sock *unix_peer_get(struct sock *); #define UNIX_HASH_SIZE 256 +#define UNIX_HASH_BITS 8 extern unsigned int unix_tot_inflight; +extern spinlock_t unix_table_lock; +extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; struct unix_address { atomic_t refcnt; int len; - unsigned hash; + unsigned int hash; struct sockaddr_un name[0]; }; struct unix_skb_parms { - struct ucred creds; /* Skb credentials */ + struct pid *pid; /* Skb credentials */ + kuid_t uid; + kgid_t gid; struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Security ID */ #endif + u32 consumed; }; -#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) -#define UNIXCREDS(skb) (&UNIXCB((skb)).creds) +#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) #define UNIXSID(skb) (&UNIXCB((skb)).secid) #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) @@ -39,31 +47,35 @@ struct unix_skb_parms { spin_lock_nested(&unix_sk(s)->lock, \ SINGLE_DEPTH_NESTING) -#ifdef __KERNEL__ /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ struct sock sk; - struct unix_address *addr; - struct dentry *dentry; - struct vfsmount *mnt; + struct unix_address *addr; + struct path path; struct mutex readlock; - struct sock *peer; - struct sock *other; + struct sock *peer; struct list_head link; - atomic_t inflight; - spinlock_t lock; - unsigned int gc_candidate : 1; - wait_queue_head_t peer_wait; + atomic_long_t inflight; + spinlock_t lock; + unsigned char recursion_level; + unsigned long gc_flags; +#define UNIX_GC_CANDIDATE 0 +#define UNIX_GC_MAYBE_CYCLE 1 + struct socket_wq peer_wq; }; #define unix_sk(__sk) ((struct unix_sock *)__sk) +#define peer_wait peer_wq.wait + +long unix_inq_len(struct sock *sk); +long unix_outq_len(struct sock *sk); + #ifdef CONFIG_SYSCTL -extern int unix_sysctl_register(struct net *net); -extern void unix_sysctl_unregister(struct net *net); +int unix_sysctl_register(struct net *net); +void unix_sysctl_unregister(struct net *net); #else static inline int unix_sysctl_register(struct net *net) { return 0; } static inline void unix_sysctl_unregister(struct net *net) {} #endif #endif -#endif diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h new file mode 100644 index 00000000000..42827786940 --- /dev/null +++ b/include/net/af_vsock.h @@ -0,0 +1,179 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __AF_VSOCK_H__ +#define __AF_VSOCK_H__ + +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/vm_sockets.h> + +#include "vsock_addr.h" + +#define LAST_RESERVED_PORT 1023 + +#define vsock_sk(__sk) ((struct vsock_sock *)__sk) +#define sk_vsock(__vsk) (&(__vsk)->sk) + +struct vsock_sock { + /* sk must be the first member. */ + struct sock sk; + struct sockaddr_vm local_addr; + struct sockaddr_vm remote_addr; + /* Links for the global tables of bound and connected sockets. */ + struct list_head bound_table; + struct list_head connected_table; + /* Accessed without the socket lock held. This means it can never be + * modified outsided of socket create or destruct. + */ + bool trusted; + bool cached_peer_allow_dgram; /* Dgram communication allowed to + * cached peer? + */ + u32 cached_peer; /* Context ID of last dgram destination check. */ + const struct cred *owner; + /* Rest are SOCK_STREAM only. */ + long connect_timeout; + /* Listening socket that this came from. */ + struct sock *listener; + /* Used for pending list and accept queue during connection handshake. + * The listening socket is the head for both lists. Sockets created + * for connection requests are placed in the pending list until they + * are connected, at which point they are put in the accept queue list + * so they can be accepted in accept(). If accept() cannot accept the + * connection, it is marked as rejected so the cleanup function knows + * to clean up the socket. + */ + struct list_head pending_links; + struct list_head accept_queue; + bool rejected; + struct delayed_work dwork; + u32 peer_shutdown; + bool sent_request; + bool ignore_connecting_rst; + + /* Private to transport. */ + void *trans; +}; + +s64 vsock_stream_has_data(struct vsock_sock *vsk); +s64 vsock_stream_has_space(struct vsock_sock *vsk); +void vsock_pending_work(struct work_struct *work); +struct sock *__vsock_create(struct net *net, + struct socket *sock, + struct sock *parent, + gfp_t priority, unsigned short type); + +/**** TRANSPORT ****/ + +struct vsock_transport_recv_notify_data { + u64 data1; /* Transport-defined. */ + u64 data2; /* Transport-defined. */ + bool notify_on_block; +}; + +struct vsock_transport_send_notify_data { + u64 data1; /* Transport-defined. */ + u64 data2; /* Transport-defined. */ +}; + +struct vsock_transport { + /* Initialize/tear-down socket. */ + int (*init)(struct vsock_sock *, struct vsock_sock *); + void (*destruct)(struct vsock_sock *); + void (*release)(struct vsock_sock *); + + /* Connections. */ + int (*connect)(struct vsock_sock *); + + /* DGRAM. */ + int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); + int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, + struct msghdr *msg, size_t len, int flags); + int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, + struct iovec *, size_t len); + bool (*dgram_allow)(u32 cid, u32 port); + + /* STREAM. */ + /* TODO: stream_bind() */ + ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, + size_t len, int flags); + ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, + size_t len); + s64 (*stream_has_data)(struct vsock_sock *); + s64 (*stream_has_space)(struct vsock_sock *); + u64 (*stream_rcvhiwat)(struct vsock_sock *); + bool (*stream_is_active)(struct vsock_sock *); + bool (*stream_allow)(u32 cid, u32 port); + + /* Notification. */ + int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); + int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); + int (*notify_recv_init)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_pre_block)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, + ssize_t, bool, struct vsock_transport_recv_notify_data *); + int (*notify_send_init)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_pre_block)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_pre_enqueue)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, + struct vsock_transport_send_notify_data *); + + /* Shutdown. */ + int (*shutdown)(struct vsock_sock *, int); + + /* Buffer sizes. */ + void (*set_buffer_size)(struct vsock_sock *, u64); + void (*set_min_buffer_size)(struct vsock_sock *, u64); + void (*set_max_buffer_size)(struct vsock_sock *, u64); + u64 (*get_buffer_size)(struct vsock_sock *); + u64 (*get_min_buffer_size)(struct vsock_sock *); + u64 (*get_max_buffer_size)(struct vsock_sock *); + + /* Addressing. */ + u32 (*get_local_cid)(void); +}; + +/**** CORE ****/ + +int __vsock_core_init(const struct vsock_transport *t, struct module *owner); +static inline int vsock_core_init(const struct vsock_transport *t) +{ + return __vsock_core_init(t, THIS_MODULE); +} +void vsock_core_exit(void); + +/**** UTILS ****/ + +void vsock_release_pending(struct sock *pending); +void vsock_add_pending(struct sock *listener, struct sock *pending); +void vsock_remove_pending(struct sock *listener, struct sock *pending); +void vsock_enqueue_accept(struct sock *listener, struct sock *connected); +void vsock_insert_connected(struct vsock_sock *vsk); +void vsock_remove_bound(struct vsock_sock *vsk); +void vsock_remove_connected(struct vsock_sock *vsk); +struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); +struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, + struct sockaddr_vm *dst); +void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); + +#endif /* __AF_VSOCK_H__ */ diff --git a/include/net/ah.h b/include/net/ah.h index ae1c322f424..ca95b98969d 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -1,43 +1,20 @@ #ifndef _NET_AH_H #define _NET_AH_H -#include <linux/crypto.h> -#include <net/xfrm.h> +#include <linux/skbuff.h> /* This is the maximum truncated ICV length that we know of. */ -#define MAX_AH_AUTH_LEN 12 +#define MAX_AH_AUTH_LEN 64 -struct ah_data -{ - u8 *work_icv; +struct crypto_ahash; + +struct ah_data { int icv_full_len; int icv_trunc_len; - struct crypto_hash *tfm; + struct crypto_ahash *ahash; }; -static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb, - u8 *auth_data) -{ - struct hash_desc desc; - int err; - - desc.tfm = ahp->tfm; - desc.flags = 0; - - memset(auth_data, 0, ahp->icv_trunc_len); - err = crypto_hash_init(&desc); - if (unlikely(err)) - goto out; - err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update); - if (unlikely(err)) - goto out; - err = crypto_hash_final(&desc, ahp->work_icv); - -out: - return err; -} - struct ip_auth_hdr; static inline struct ip_auth_hdr *ip_auth_hdr(const struct sk_buff *skb) diff --git a/include/net/arp.h b/include/net/arp.h index c236270ec95..73c49864076 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -3,29 +3,64 @@ #define _ARP_H #include <linux/if_arp.h> +#include <linux/hash.h> #include <net/neighbour.h> extern struct neigh_table arp_tbl; -extern void arp_init(void); -extern int arp_find(unsigned char *haddr, struct sk_buff *skb); -extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); -extern void arp_send(int type, int ptype, __be32 dest_ip, - struct net_device *dev, __be32 src_ip, - const unsigned char *dest_hw, - const unsigned char *src_hw, const unsigned char *th); -extern int arp_bind_neighbour(struct dst_entry *dst); -extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); -extern void arp_ifdown(struct net_device *dev); - -extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, - struct net_device *dev, __be32 src_ip, - const unsigned char *dest_hw, - const unsigned char *src_hw, - const unsigned char *target_hw); -extern void arp_xmit(struct sk_buff *skb); - -extern struct neigh_ops arp_broken_ops; +static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd) +{ + u32 val = key ^ hash32_ptr(dev); + + return val * hash_rnd; +} + +static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) +{ + struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht); + struct neighbour *n; + u32 hash_val; + + hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + if (n->dev == dev && *(u32 *)n->primary_key == key) + return n; + } + + return NULL; +} + +static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key) +{ + struct neighbour *n; + + rcu_read_lock_bh(); + n = __ipv4_neigh_lookup_noref(dev, key); + if (n && !atomic_inc_not_zero(&n->refcnt)) + n = NULL; + rcu_read_unlock_bh(); + + return n; +} + +void arp_init(void); +int arp_find(unsigned char *haddr, struct sk_buff *skb); +int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); +void arp_send(int type, int ptype, __be32 dest_ip, + struct net_device *dev, __be32 src_ip, + const unsigned char *dest_hw, + const unsigned char *src_hw, const unsigned char *th); +int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); +void arp_ifdown(struct net_device *dev); + +struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, + struct net_device *dev, __be32 src_ip, + const unsigned char *dest_hw, + const unsigned char *src_hw, + const unsigned char *target_hw); +void arp_xmit(struct sk_buff *skb); #endif /* _ARP_H */ diff --git a/include/net/atmclip.h b/include/net/atmclip.h index b5a51a7bb36..5865924d4aa 100644 --- a/include/net/atmclip.h +++ b/include/net/atmclip.h @@ -15,7 +15,6 @@ #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back)) -#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key) struct sk_buff; @@ -36,27 +35,18 @@ struct clip_vcc { struct atmarp_entry { - __be32 ip; /* IP address */ struct clip_vcc *vccs; /* active VCCs; NULL if resolution is pending */ unsigned long expires; /* entry expiration time */ struct neighbour *neigh; /* neighbour back-pointer */ }; - #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev)) - struct clip_priv { int number; /* for convenience ... */ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ - struct net_device_stats stats; struct net_device *next; /* next CLIP interface */ }; - -#ifdef __KERNEL__ -extern struct neigh_table *clip_tbl_hook; -#endif - #endif diff --git a/include/net/ax25.h b/include/net/ax25.h index 717e2192d52..bf0396e9a5d 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -10,7 +10,8 @@ #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/list.h> -#include <asm/atomic.h> +#include <linux/slab.h> +#include <linux/atomic.h> #define AX25_T1CLAMPLO 1 #define AX25_T1CLAMPHI (30 * HZ) @@ -156,12 +157,12 @@ enum { typedef struct ax25_uid_assoc { struct hlist_node uid_node; atomic_t refcount; - uid_t uid; + kuid_t uid; ax25_address call; } ax25_uid_assoc; -#define ax25_uid_for_each(__ax25, node, list) \ - hlist_for_each_entry(__ax25, node, list, uid_node) +#define ax25_uid_for_each(__ax25, list) \ + hlist_for_each_entry(__ax25, list, uid_node) #define ax25_uid_hold(ax25) \ atomic_inc(&((ax25)->refcount)) @@ -194,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) atomic_inc(&ax25_rt->refcount); } -extern void __ax25_put_route(ax25_route *ax25_rt); +void __ax25_put_route(ax25_route *ax25_rt); static inline void ax25_put_route(ax25_route *ax25_rt) { @@ -214,7 +215,7 @@ typedef struct ax25_dev { struct ax25_dev *next; struct net_device *dev; struct net_device *forward; - struct ctl_table *systable; + struct ctl_table_header *sysheader; int values[AX25_MAX_VALUES]; #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; @@ -246,8 +247,8 @@ typedef struct ax25_cb { #define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) -#define ax25_for_each(__ax25, node, list) \ - hlist_for_each_entry(__ax25, node, list, ax25_node) +#define ax25_for_each(__ax25, list) \ + hlist_for_each_entry(__ax25, list, ax25_node) #define ax25_cb_hold(__ax25) \ atomic_inc(&((__ax25)->refcount)) @@ -271,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev /* af_ax25.c */ extern struct hlist_head ax25_list; extern spinlock_t ax25_list_lock; -extern void ax25_cb_add(ax25_cb *); +void ax25_cb_add(ax25_cb *); struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int); struct sock *ax25_get_socket(ax25_address *, ax25_address *, int); -extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *); -extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); -extern void ax25_destroy_socket(ax25_cb *); -extern ax25_cb * __must_check ax25_create_cb(void); -extern void ax25_fillin_cb(ax25_cb *, ax25_dev *); -extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *); +ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, + struct net_device *); +void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); +void ax25_destroy_socket(ax25_cb *); +ax25_cb * __must_check ax25_create_cb(void); +void ax25_fillin_cb(ax25_cb *, ax25_dev *); +struct sock *ax25_make_new(struct sock *, struct ax25_dev *); /* ax25_addr.c */ extern const ax25_address ax25_bcast; extern const ax25_address ax25_defaddr; extern const ax25_address null_ax25_address; -extern char *ax2asc(char *buf, const ax25_address *); -extern void asc2ax(ax25_address *addr, const char *callsign); -extern int ax25cmp(const ax25_address *, const ax25_address *); -extern int ax25digicmp(const ax25_digi *, const ax25_digi *); -extern const unsigned char *ax25_addr_parse(const unsigned char *, int, +char *ax2asc(char *buf, const ax25_address *); +void asc2ax(ax25_address *addr, const char *callsign); +int ax25cmp(const ax25_address *, const ax25_address *); +int ax25digicmp(const ax25_digi *, const ax25_digi *); +const unsigned char *ax25_addr_parse(const unsigned char *, int, ax25_address *, ax25_address *, ax25_digi *, int *, int *); -extern int ax25_addr_build(unsigned char *, const ax25_address *, - const ax25_address *, const ax25_digi *, int, int); -extern int ax25_addr_size(const ax25_digi *); -extern void ax25_digi_invert(const ax25_digi *, ax25_digi *); +int ax25_addr_build(unsigned char *, const ax25_address *, + const ax25_address *, const ax25_digi *, int, int); +int ax25_addr_size(const ax25_digi *); +void ax25_digi_invert(const ax25_digi *, ax25_digi *); /* ax25_dev.c */ extern ax25_dev *ax25_dev_list; @@ -305,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev) return dev->ax25_ptr; } -extern ax25_dev *ax25_addr_ax25dev(ax25_address *); -extern void ax25_dev_device_up(struct net_device *); -extern void ax25_dev_device_down(struct net_device *); -extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *); -extern struct net_device *ax25_fwd_dev(struct net_device *); -extern void ax25_dev_free(void); +ax25_dev *ax25_addr_ax25dev(ax25_address *); +void ax25_dev_device_up(struct net_device *); +void ax25_dev_device_down(struct net_device *); +int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *); +struct net_device *ax25_fwd_dev(struct net_device *); +void ax25_dev_free(void); /* ax25_ds_in.c */ -extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int); +int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_ds_subr.c */ -extern void ax25_ds_nr_error_recovery(ax25_cb *); -extern void ax25_ds_enquiry_response(ax25_cb *); -extern void ax25_ds_establish_data_link(ax25_cb *); -extern void ax25_dev_dama_off(ax25_dev *); -extern void ax25_dama_on(ax25_cb *); -extern void ax25_dama_off(ax25_cb *); +void ax25_ds_nr_error_recovery(ax25_cb *); +void ax25_ds_enquiry_response(ax25_cb *); +void ax25_ds_establish_data_link(ax25_cb *); +void ax25_dev_dama_off(ax25_dev *); +void ax25_dama_on(ax25_cb *); +void ax25_dama_off(ax25_cb *); /* ax25_ds_timer.c */ -extern void ax25_ds_setup_timer(ax25_dev *); -extern void ax25_ds_set_timer(ax25_dev *); -extern void ax25_ds_del_timer(ax25_dev *); -extern void ax25_ds_timer(ax25_cb *); -extern void ax25_ds_t1_timeout(ax25_cb *); -extern void ax25_ds_heartbeat_expiry(ax25_cb *); -extern void ax25_ds_t3timer_expiry(ax25_cb *); -extern void ax25_ds_idletimer_expiry(ax25_cb *); +void ax25_ds_setup_timer(ax25_dev *); +void ax25_ds_set_timer(ax25_dev *); +void ax25_ds_del_timer(ax25_dev *); +void ax25_ds_timer(ax25_cb *); +void ax25_ds_t1_timeout(ax25_cb *); +void ax25_ds_heartbeat_expiry(ax25_cb *); +void ax25_ds_t3timer_expiry(ax25_cb *); +void ax25_ds_idletimer_expiry(ax25_cb *); /* ax25_iface.c */ @@ -341,110 +343,112 @@ struct ax25_protocol { int (*func)(struct sk_buff *, ax25_cb *); }; -extern void ax25_register_pid(struct ax25_protocol *ap); -extern void ax25_protocol_release(unsigned int); +void ax25_register_pid(struct ax25_protocol *ap); +void ax25_protocol_release(unsigned int); struct ax25_linkfail { struct hlist_node lf_node; void (*func)(ax25_cb *, int); }; -extern void ax25_linkfail_register(struct ax25_linkfail *lf); -extern void ax25_linkfail_release(struct ax25_linkfail *lf); -extern int __must_check ax25_listen_register(ax25_address *, - struct net_device *); -extern void ax25_listen_release(ax25_address *, struct net_device *); -extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); -extern int ax25_listen_mine(ax25_address *, struct net_device *); -extern void ax25_link_failed(ax25_cb *, int); -extern int ax25_protocol_is_registered(unsigned int); +void ax25_linkfail_register(struct ax25_linkfail *lf); +void ax25_linkfail_release(struct ax25_linkfail *lf); +int __must_check ax25_listen_register(ax25_address *, struct net_device *); +void ax25_listen_release(ax25_address *, struct net_device *); +int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); +int ax25_listen_mine(ax25_address *, struct net_device *); +void ax25_link_failed(ax25_cb *, int); +int ax25_protocol_is_registered(unsigned int); /* ax25_in.c */ -extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *); -extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); +int ax25_rx_iframe(ax25_cb *, struct sk_buff *); +int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, + struct net_device *); /* ax25_ip.c */ -extern int ax25_hard_header(struct sk_buff *, struct net_device *, - unsigned short, const void *, - const void *, unsigned int); -extern int ax25_rebuild_header(struct sk_buff *); +int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short, + const void *, const void *, unsigned int); +int ax25_rebuild_header(struct sk_buff *); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ -extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *); -extern void ax25_output(ax25_cb *, int, struct sk_buff *); -extern void ax25_kick(ax25_cb *); -extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); -extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev); -extern int ax25_check_iframes_acked(ax25_cb *, unsigned short); +ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, + ax25_digi *, struct net_device *); +void ax25_output(ax25_cb *, int, struct sk_buff *); +void ax25_kick(ax25_cb *); +void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); +void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev); +int ax25_check_iframes_acked(ax25_cb *, unsigned short); /* ax25_route.c */ -extern void ax25_rt_device_down(struct net_device *); -extern int ax25_rt_ioctl(unsigned int, void __user *); +void ax25_rt_device_down(struct net_device *); +int ax25_rt_ioctl(unsigned int, void __user *); extern const struct file_operations ax25_route_fops; -extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); -extern int ax25_rt_autobind(ax25_cb *, ax25_address *); -extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); -extern void ax25_rt_free(void); +ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); +int ax25_rt_autobind(ax25_cb *, ax25_address *); +struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, + ax25_address *, ax25_digi *); +void ax25_rt_free(void); /* ax25_std_in.c */ -extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); +int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_std_subr.c */ -extern void ax25_std_nr_error_recovery(ax25_cb *); -extern void ax25_std_establish_data_link(ax25_cb *); -extern void ax25_std_transmit_enquiry(ax25_cb *); -extern void ax25_std_enquiry_response(ax25_cb *); -extern void ax25_std_timeout_response(ax25_cb *); +void ax25_std_nr_error_recovery(ax25_cb *); +void ax25_std_establish_data_link(ax25_cb *); +void ax25_std_transmit_enquiry(ax25_cb *); +void ax25_std_enquiry_response(ax25_cb *); +void ax25_std_timeout_response(ax25_cb *); /* ax25_std_timer.c */ -extern void ax25_std_heartbeat_expiry(ax25_cb *); -extern void ax25_std_t1timer_expiry(ax25_cb *); -extern void ax25_std_t2timer_expiry(ax25_cb *); -extern void ax25_std_t3timer_expiry(ax25_cb *); -extern void ax25_std_idletimer_expiry(ax25_cb *); +void ax25_std_heartbeat_expiry(ax25_cb *); +void ax25_std_t1timer_expiry(ax25_cb *); +void ax25_std_t2timer_expiry(ax25_cb *); +void ax25_std_t3timer_expiry(ax25_cb *); +void ax25_std_idletimer_expiry(ax25_cb *); /* ax25_subr.c */ -extern void ax25_clear_queues(ax25_cb *); -extern void ax25_frames_acked(ax25_cb *, unsigned short); -extern void ax25_requeue_frames(ax25_cb *); -extern int ax25_validate_nr(ax25_cb *, unsigned short); -extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *); -extern void ax25_send_control(ax25_cb *, int, int, int); -extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *); -extern void ax25_calculate_t1(ax25_cb *); -extern void ax25_calculate_rtt(ax25_cb *); -extern void ax25_disconnect(ax25_cb *, int); +void ax25_clear_queues(ax25_cb *); +void ax25_frames_acked(ax25_cb *, unsigned short); +void ax25_requeue_frames(ax25_cb *); +int ax25_validate_nr(ax25_cb *, unsigned short); +int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *); +void ax25_send_control(ax25_cb *, int, int, int); +void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, + ax25_digi *); +void ax25_calculate_t1(ax25_cb *); +void ax25_calculate_rtt(ax25_cb *); +void ax25_disconnect(ax25_cb *, int); /* ax25_timer.c */ -extern void ax25_setup_timers(ax25_cb *); -extern void ax25_start_heartbeat(ax25_cb *); -extern void ax25_start_t1timer(ax25_cb *); -extern void ax25_start_t2timer(ax25_cb *); -extern void ax25_start_t3timer(ax25_cb *); -extern void ax25_start_idletimer(ax25_cb *); -extern void ax25_stop_heartbeat(ax25_cb *); -extern void ax25_stop_t1timer(ax25_cb *); -extern void ax25_stop_t2timer(ax25_cb *); -extern void ax25_stop_t3timer(ax25_cb *); -extern void ax25_stop_idletimer(ax25_cb *); -extern int ax25_t1timer_running(ax25_cb *); -extern unsigned long ax25_display_timer(struct timer_list *); +void ax25_setup_timers(ax25_cb *); +void ax25_start_heartbeat(ax25_cb *); +void ax25_start_t1timer(ax25_cb *); +void ax25_start_t2timer(ax25_cb *); +void ax25_start_t3timer(ax25_cb *); +void ax25_start_idletimer(ax25_cb *); +void ax25_stop_heartbeat(ax25_cb *); +void ax25_stop_t1timer(ax25_cb *); +void ax25_stop_t2timer(ax25_cb *); +void ax25_stop_t3timer(ax25_cb *); +void ax25_stop_idletimer(ax25_cb *); +int ax25_t1timer_running(ax25_cb *); +unsigned long ax25_display_timer(struct timer_list *); /* ax25_uid.c */ extern int ax25_uid_policy; -extern ax25_uid_assoc *ax25_findbyuid(uid_t); -extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); +ax25_uid_assoc *ax25_findbyuid(kuid_t); +int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); extern const struct file_operations ax25_uid_fops; -extern void ax25_uid_free(void); +void ax25_uid_free(void); /* sysctl_net_ax25.c */ #ifdef CONFIG_SYSCTL -extern void ax25_register_sysctl(void); -extern void ax25_unregister_sysctl(void); +int ax25_register_dev_sysctl(ax25_dev *ax25_dev); +void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); #else -static inline void ax25_register_sysctl(void) {}; -static inline void ax25_unregister_sysctl(void) {}; +static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; } +static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {} #endif /* CONFIG_SYSCTL */ #endif diff --git a/include/net/ax88796.h b/include/net/ax88796.h index 51329dae44e..b9a3beca0ce 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -15,14 +15,17 @@ #define AXFLG_HAS_EEPROM (1<<0) #define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */ #define AXFLG_HAS_93CX6 (1<<2) /* use eeprom_93cx6 driver */ +#define AXFLG_MAC_FROMPLATFORM (1<<3) /* MAC given by platform data */ struct ax_plat_data { unsigned int flags; - unsigned char wordlength; /* 1 or 2 */ - unsigned char dcr_val; /* default value for DCR */ - unsigned char rcr_val; /* default value for RCR */ - unsigned char gpoc_val; /* default value for GPOC */ - u32 *reg_offsets; /* register offsets */ + unsigned char wordlength; /* 1 or 2 */ + unsigned char dcr_val; /* default value for DCR */ + unsigned char rcr_val; /* default value for RCR */ + unsigned char gpoc_val; /* default value for GPOC */ + u32 *reg_offsets; /* register offsets */ + u8 *mac_addr; /* MAC addr (only used when + AXFLG_MAC_FROMPLATFORM is used */ }; #endif /* __NET_AX88796_PLAT_H */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 750648df13f..904777c1cd2 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -1,4 +1,4 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated @@ -12,30 +12,33 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __BLUETOOTH_H #define __BLUETOOTH_H -#include <asm/types.h> -#include <asm/byteorder.h> -#include <linux/list.h> #include <linux/poll.h> #include <net/sock.h> +#include <linux/seq_file.h> #ifndef AF_BLUETOOTH #define AF_BLUETOOTH 31 #define PF_BLUETOOTH AF_BLUETOOTH #endif +/* Bluetooth versions */ +#define BLUETOOTH_VER_1_1 1 +#define BLUETOOTH_VER_1_2 2 +#define BLUETOOTH_VER_2_0 3 + /* Reserv for core and drivers use */ #define BT_SKB_RESERVE 8 @@ -53,9 +56,77 @@ #define SOL_SCO 17 #define SOL_RFCOMM 18 -#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg) -#define BT_DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __FUNCTION__ , ## arg) -#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __FUNCTION__ , ## arg) +#define BT_SECURITY 4 +struct bt_security { + __u8 level; + __u8 key_size; +}; +#define BT_SECURITY_SDP 0 +#define BT_SECURITY_LOW 1 +#define BT_SECURITY_MEDIUM 2 +#define BT_SECURITY_HIGH 3 +#define BT_SECURITY_FIPS 4 + +#define BT_DEFER_SETUP 7 + +#define BT_FLUSHABLE 8 + +#define BT_FLUSHABLE_OFF 0 +#define BT_FLUSHABLE_ON 1 + +#define BT_POWER 9 +struct bt_power { + __u8 force_active; +}; +#define BT_POWER_FORCE_ACTIVE_OFF 0 +#define BT_POWER_FORCE_ACTIVE_ON 1 + +#define BT_CHANNEL_POLICY 10 + +/* BR/EDR only (default policy) + * AMP controllers cannot be used. + * Channel move requests from the remote device are denied. + * If the L2CAP channel is currently using AMP, move the channel to BR/EDR. + */ +#define BT_CHANNEL_POLICY_BREDR_ONLY 0 + +/* BR/EDR Preferred + * Allow use of AMP controllers. + * If the L2CAP channel is currently on AMP, move it to BR/EDR. + * Channel move requests from the remote device are allowed. + */ +#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1 + +/* AMP Preferred + * Allow use of AMP controllers + * If the L2CAP channel is currently on BR/EDR and AMP controller + * resources are available, initiate a channel move to AMP. + * Channel move requests from the remote device are allowed. + * If the L2CAP socket has not been connected yet, try to create + * and configure the channel directly on an AMP controller rather + * than BR/EDR. + */ +#define BT_CHANNEL_POLICY_AMP_PREFERRED 2 + +#define BT_VOICE 11 +struct bt_voice { + __u16 setting; +}; + +#define BT_VOICE_TRANSPARENT 0x0003 +#define BT_VOICE_CVSD_16BIT 0x0060 + +#define BT_SNDMTU 12 +#define BT_RCVMTU 13 + +__printf(1, 2) +int bt_info(const char *fmt, ...); +__printf(1, 2) +int bt_err(const char *fmt, ...); + +#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) +#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) +#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) /* Connection and socket states */ enum { @@ -70,33 +141,80 @@ enum { BT_CLOSED }; -/* Endianness conversions */ -#define htobs(a) __cpu_to_le16(a) -#define htobl(a) __cpu_to_le32(a) -#define btohs(a) __le16_to_cpu(a) -#define btohl(a) __le32_to_cpu(a) +/* If unused will be removed by compiler */ +static inline const char *state_to_string(int state) +{ + switch (state) { + case BT_CONNECTED: + return "BT_CONNECTED"; + case BT_OPEN: + return "BT_OPEN"; + case BT_BOUND: + return "BT_BOUND"; + case BT_LISTEN: + return "BT_LISTEN"; + case BT_CONNECT: + return "BT_CONNECT"; + case BT_CONNECT2: + return "BT_CONNECT2"; + case BT_CONFIG: + return "BT_CONFIG"; + case BT_DISCONN: + return "BT_DISCONN"; + case BT_CLOSED: + return "BT_CLOSED"; + } + + return "invalid state"; +} /* BD Address */ typedef struct { __u8 b[6]; -} __attribute__((packed)) bdaddr_t; +} __packed bdaddr_t; + +/* BD Address type */ +#define BDADDR_BREDR 0x00 +#define BDADDR_LE_PUBLIC 0x01 +#define BDADDR_LE_RANDOM 0x02 + +static inline bool bdaddr_type_is_valid(__u8 type) +{ + switch (type) { + case BDADDR_BREDR: + case BDADDR_LE_PUBLIC: + case BDADDR_LE_RANDOM: + return true; + } + + return false; +} + +static inline bool bdaddr_type_is_le(__u8 type) +{ + switch (type) { + case BDADDR_LE_PUBLIC: + case BDADDR_LE_RANDOM: + return true; + } + + return false; +} -#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) -#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) +#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) +#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}) /* Copy, swap, convert BD Address */ -static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) +static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2) { return memcmp(ba1, ba2, sizeof(bdaddr_t)); } -static inline void bacpy(bdaddr_t *dst, bdaddr_t *src) +static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src) { memcpy(dst, src, sizeof(bdaddr_t)); } void baswap(bdaddr_t *dst, bdaddr_t *src); -char *batostr(bdaddr_t *ba); -bdaddr_t *strtoba(char *str); /* Common socket structures and functions */ @@ -104,77 +222,141 @@ bdaddr_t *strtoba(char *str); struct bt_sock { struct sock sk; - bdaddr_t src; - bdaddr_t dst; struct list_head accept_q; struct sock *parent; + unsigned long flags; + void (*skb_msg_name)(struct sk_buff *, void *, int *); +}; + +enum { + BT_SK_DEFER_SETUP, + BT_SK_SUSPEND, }; struct bt_sock_list { struct hlist_head head; rwlock_t lock; +#ifdef CONFIG_PROC_FS + int (* custom_seq_show)(struct seq_file *, void *); +#endif }; -int bt_sock_register(int proto, struct net_proto_family *ops); -int bt_sock_unregister(int proto); +int bt_sock_register(int proto, const struct net_proto_family *ops); +void bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); -int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); -uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); +int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags); +int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags); +uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); +int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); +int bt_sock_wait_ready(struct sock *sk, unsigned long flags); void bt_accept_enqueue(struct sock *parent, struct sock *sk); void bt_accept_unlink(struct sock *sk); struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock); /* Skb helpers */ +struct l2cap_ctrl { + unsigned int sframe:1, + poll:1, + final:1, + fcs:1, + sar:2, + super:2; + __u16 reqseq; + __u16 txseq; + __u8 retries; +}; + +struct hci_dev; + +typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status); + +struct hci_req_ctrl { + bool start; + u8 event; + hci_req_complete_t complete; +}; + struct bt_skb_cb { __u8 pkt_type; __u8 incoming; + __u16 expect; + __u8 force_active; + struct l2cap_chan *chan; + struct l2cap_ctrl control; + struct hci_req_ctrl req; + bdaddr_t bdaddr; + __le16 psm; }; -#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) +#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) { struct sk_buff *skb; - if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { + skb = alloc_skb(len + BT_SKB_RESERVE, how); + if (skb) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } return skb; } -static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len, - int nb, int *err) +static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, + unsigned long len, int nb, int *err) { struct sk_buff *skb; - if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { + skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err); + if (skb) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } + if (!skb && *err) + return NULL; + + *err = sock_error(sk); + if (*err) + goto out; + + if (sk->sk_shutdown) { + *err = -ECONNRESET; + goto out; + } + return skb; + +out: + kfree_skb(skb); + return NULL; } -static inline int skb_frags_no(struct sk_buff *skb) -{ - register struct sk_buff *frag = skb_shinfo(skb)->frag_list; - register int n = 1; +int bt_to_errno(__u16 code); - for (; frag; frag=frag->next, n++); - return n; -} +int hci_sock_init(void); +void hci_sock_cleanup(void); + +int bt_sysfs_init(void); +void bt_sysfs_cleanup(void); + +int bt_procfs_init(struct net *net, const char *name, + struct bt_sock_list *sk_list, + int (*seq_show)(struct seq_file *, void *)); +void bt_procfs_cleanup(struct net *net, const char *name); -int bt_err(__u16 code); +extern struct dentry *bt_debugfs; -extern int hci_sock_init(void); -extern void hci_sock_cleanup(void); +int l2cap_init(void); +void l2cap_exit(void); -extern int bt_sysfs_init(void); -extern void bt_sysfs_cleanup(void); +int sco_init(void); +void sco_exit(void); -extern struct class *bt_class; +void bt_sock_reclassify_lock(struct sock *sk, int proto); #endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index a8a9eb6af96..16587dcd6a9 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1,4 +1,4 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated @@ -12,13 +12,13 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ @@ -30,6 +30,13 @@ #define HCI_MAX_EVENT_SIZE 260 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) +#define HCI_LINK_KEY_SIZE 16 +#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE) + +#define HCI_MAX_AMP_ASSOC_SIZE 672 + +#define HCI_MAX_CSB_DATA_SIZE 252 + /* HCI dev events */ #define HCI_DEV_REG 1 #define HCI_DEV_UNREG 2 @@ -43,7 +50,7 @@ #define HCI_NOTIFY_CONN_DEL 2 #define HCI_NOTIFY_VOICE_SETTING 3 -/* HCI device types */ +/* HCI bus types */ #define HCI_VIRTUAL 0 #define HCI_USB 1 #define HCI_PCCARD 2 @@ -52,11 +59,32 @@ #define HCI_PCI 5 #define HCI_SDIO 6 +/* HCI controller types */ +#define HCI_BREDR 0x00 +#define HCI_AMP 0x01 + +/* First BR/EDR Controller shall have ID = 0 */ +#define AMP_ID_BREDR 0x00 + +/* AMP controller types */ +#define AMP_TYPE_BREDR 0x00 +#define AMP_TYPE_80211 0x01 + +/* AMP controller status */ +#define AMP_STATUS_POWERED_DOWN 0x00 +#define AMP_STATUS_BLUETOOTH_ONLY 0x01 +#define AMP_STATUS_NO_CAPACITY 0x02 +#define AMP_STATUS_LOW_CAPACITY 0x03 +#define AMP_STATUS_MEDIUM_CAPACITY 0x04 +#define AMP_STATUS_HIGH_CAPACITY 0x05 +#define AMP_STATUS_FULL_CAPACITY 0x06 + /* HCI device quirks */ enum { - HCI_QUIRK_RESET_ON_INIT, + HCI_QUIRK_RESET_ON_CLOSE, HCI_QUIRK_RAW_DEVICE, - HCI_QUIRK_FIXUP_BUFFER_SIZE + HCI_QUIRK_FIXUP_BUFFER_SIZE, + HCI_QUIRK_BROKEN_STORED_LINK_KEY, }; /* HCI device flags */ @@ -73,9 +101,54 @@ enum { HCI_RAW, - HCI_SECMGR + HCI_RESET, +}; + +/* + * BR/EDR and/or LE controller flags: the flags defined here should represent + * states from the controller. + */ +enum { + HCI_SETUP, + HCI_AUTO_OFF, + HCI_RFKILLED, + HCI_MGMT, + HCI_PAIRABLE, + HCI_SERVICE_CACHE, + HCI_DEBUG_KEYS, + HCI_DUT_MODE, + HCI_FORCE_SC, + HCI_FORCE_STATIC_ADDR, + HCI_UNREGISTER, + HCI_USER_CHANNEL, + + HCI_LE_SCAN, + HCI_SSP_ENABLED, + HCI_SC_ENABLED, + HCI_SC_ONLY, + HCI_PRIVACY, + HCI_RPA_EXPIRED, + HCI_RPA_RESOLVING, + HCI_HS_ENABLED, + HCI_LE_ENABLED, + HCI_ADVERTISING, + HCI_CONNECTABLE, + HCI_DISCOVERABLE, + HCI_LIMITED_DISCOVERABLE, + HCI_LINK_SECURITY, + HCI_PERIODIC_INQ, + HCI_FAST_CONNECTABLE, + HCI_BREDR_ENABLED, + HCI_6LOWPAN_ENABLED, + HCI_LE_SCAN_INTERRUPTED, }; +/* A mask for the flags that are supposed to remain when a reset happens + * or the HCI device is closed. + */ +#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \ + BIT(HCI_FAST_CONNECTABLE)) + /* HCI ioctl defines */ #define HCIDEVUP _IOW('H', 201, int) #define HCIDEVDOWN _IOW('H', 202, int) @@ -86,6 +159,7 @@ enum { #define HCIGETDEVINFO _IOR('H', 211, int) #define HCIGETCONNLIST _IOR('H', 212, int) #define HCIGETCONNINFO _IOR('H', 213, int) +#define HCIGETAUTHINFO _IOR('H', 215, int) #define HCISETRAW _IOW('H', 220, int) #define HCISETSCAN _IOW('H', 221, int) @@ -97,15 +171,20 @@ enum { #define HCISETACLMTU _IOW('H', 227, int) #define HCISETSCOMTU _IOW('H', 228, int) -#define HCISETSECMGR _IOW('H', 230, int) +#define HCIBLOCKADDR _IOW('H', 230, int) +#define HCIUNBLOCKADDR _IOW('H', 231, int) #define HCIINQUIRY _IOR('H', 240, int) /* HCI timeouts */ -#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ -#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ -#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ -#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ +#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */ +#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */ +#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */ +#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */ +#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ /* HCI data types */ #define HCI_COMMAND_PKT 0x01 @@ -136,10 +215,19 @@ enum { #define ESCO_EV3 0x0008 #define ESCO_EV4 0x0010 #define ESCO_EV5 0x0020 +#define ESCO_2EV3 0x0040 +#define ESCO_3EV3 0x0080 +#define ESCO_2EV5 0x0100 +#define ESCO_3EV5 0x0200 + +#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) +#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) /* ACL flags */ +#define ACL_START_NO_FLUSH 0x00 #define ACL_CONT 0x01 #define ACL_START 0x02 +#define ACL_COMPLETE 0x03 #define ACL_ACTIVE_BCAST 0x04 #define ACL_PICO_BCAST 0x08 @@ -147,6 +235,9 @@ enum { #define SCO_LINK 0x00 #define ACL_LINK 0x01 #define ESCO_LINK 0x02 +/* Low Energy links do not have defined link type. Use invented one */ +#define LE_LINK 0x80 +#define AMP_LINK 0x81 /* LMP features */ #define LMP_3SLOT 0x01 @@ -170,13 +261,45 @@ enum { #define LMP_CVSD 0x01 #define LMP_PSCHEME 0x02 #define LMP_PCONTROL 0x04 +#define LMP_TRANSPARENT 0x08 +#define LMP_RSSI_INQ 0x40 #define LMP_ESCO 0x80 #define LMP_EV4 0x01 #define LMP_EV5 0x02 +#define LMP_NO_BREDR 0x20 +#define LMP_LE 0x40 #define LMP_SNIFF_SUBR 0x02 +#define LMP_PAUSE_ENC 0x04 +#define LMP_EDR_ESCO_2M 0x20 +#define LMP_EDR_ESCO_3M 0x40 +#define LMP_EDR_3S_ESCO 0x80 + +#define LMP_EXT_INQ 0x01 +#define LMP_SIMUL_LE_BR 0x02 +#define LMP_SIMPLE_PAIR 0x08 +#define LMP_NO_FLUSH 0x40 + +#define LMP_LSTO 0x01 +#define LMP_INQ_TX_PWR 0x02 +#define LMP_EXTFEATURES 0x80 + +/* Extended LMP features */ +#define LMP_CSB_MASTER 0x01 +#define LMP_CSB_SLAVE 0x02 +#define LMP_SYNC_TRAIN 0x04 +#define LMP_SYNC_SCAN 0x08 + +#define LMP_SC 0x01 +#define LMP_PING 0x02 + +/* Host features */ +#define LMP_HOST_SSP 0x01 +#define LMP_HOST_LE 0x02 +#define LMP_HOST_LE_BREDR 0x04 +#define LMP_HOST_SC 0x08 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -198,17 +321,98 @@ enum { #define HCI_LM_TRUSTED 0x0008 #define HCI_LM_RELIABLE 0x0010 #define HCI_LM_SECURE 0x0020 +#define HCI_LM_FIPS 0x0040 + +/* Authentication types */ +#define HCI_AT_NO_BONDING 0x00 +#define HCI_AT_NO_BONDING_MITM 0x01 +#define HCI_AT_DEDICATED_BONDING 0x02 +#define HCI_AT_DEDICATED_BONDING_MITM 0x03 +#define HCI_AT_GENERAL_BONDING 0x04 +#define HCI_AT_GENERAL_BONDING_MITM 0x05 + +/* I/O capabilities */ +#define HCI_IO_DISPLAY_ONLY 0x00 +#define HCI_IO_DISPLAY_YESNO 0x01 +#define HCI_IO_KEYBOARD_ONLY 0x02 +#define HCI_IO_NO_INPUT_OUTPUT 0x03 + +/* Link Key types */ +#define HCI_LK_COMBINATION 0x00 +#define HCI_LK_LOCAL_UNIT 0x01 +#define HCI_LK_REMOTE_UNIT 0x02 +#define HCI_LK_DEBUG_COMBINATION 0x03 +#define HCI_LK_UNAUTH_COMBINATION_P192 0x04 +#define HCI_LK_AUTH_COMBINATION_P192 0x05 +#define HCI_LK_CHANGED_COMBINATION 0x06 +#define HCI_LK_UNAUTH_COMBINATION_P256 0x07 +#define HCI_LK_AUTH_COMBINATION_P256 0x08 +/* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */ +#define HCI_SMP_STK 0x80 +#define HCI_SMP_STK_SLAVE 0x81 +#define HCI_SMP_LTK 0x82 +#define HCI_SMP_LTK_SLAVE 0x83 + +/* Long Term Key types */ +#define HCI_LTK_UNAUTH 0x00 +#define HCI_LTK_AUTH 0x01 + +/* ---- HCI Error Codes ---- */ +#define HCI_ERROR_AUTH_FAILURE 0x05 +#define HCI_ERROR_MEMORY_EXCEEDED 0x07 +#define HCI_ERROR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERROR_REJ_BAD_ADDR 0x0f +#define HCI_ERROR_REMOTE_USER_TERM 0x13 +#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 +#define HCI_ERROR_REMOTE_POWER_OFF 0x15 +#define HCI_ERROR_LOCAL_HOST_TERM 0x16 +#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 +#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c + +/* Flow control modes */ +#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 +#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01 + +/* The core spec defines 127 as the "not available" value */ +#define HCI_TX_POWER_INVALID 127 + +/* Extended Inquiry Response field types */ +#define EIR_FLAGS 0x01 /* flags */ +#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ +#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ +#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ +#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ +#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ +#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ +#define EIR_NAME_SHORT 0x08 /* shortened local name */ +#define EIR_NAME_COMPLETE 0x09 /* complete local name */ +#define EIR_TX_POWER 0x0A /* transmit power level */ +#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */ +#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */ +#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */ +#define EIR_DEVICE_ID 0x10 /* device ID */ + +/* Low Energy Advertising Flags */ +#define LE_AD_LIMITED 0x01 /* Limited Discoverable */ +#define LE_AD_GENERAL 0x02 /* General Discoverable */ +#define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */ +#define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */ +#define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */ /* ----- HCI Commands ---- */ +#define HCI_OP_NOP 0x0000 + #define HCI_OP_INQUIRY 0x0401 struct hci_cp_inquiry { __u8 lap[3]; __u8 length; __u8 num_rsp; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_INQUIRY_CANCEL 0x0402 +#define HCI_OP_PERIODIC_INQ 0x0403 + #define HCI_OP_EXIT_PERIODIC_INQ 0x0404 #define HCI_OP_CREATE_CONN 0x0405 @@ -219,81 +423,89 @@ struct hci_cp_create_conn { __u8 pscan_mode; __le16 clock_offset; __u8 role_switch; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_DISCONNECT 0x0406 struct hci_cp_disconnect { __le16 handle; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ADD_SCO 0x0407 struct hci_cp_add_sco { __le16 handle; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_CREATE_CONN_CANCEL 0x0408 struct hci_cp_create_conn_cancel { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ACCEPT_CONN_REQ 0x0409 struct hci_cp_accept_conn_req { bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REJECT_CONN_REQ 0x040a struct hci_cp_reject_conn_req { bdaddr_t bdaddr; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_LINK_KEY_REPLY 0x040b struct hci_cp_link_key_reply { bdaddr_t bdaddr; - __u8 link_key[16]; -} __attribute__ ((packed)); + __u8 link_key[HCI_LINK_KEY_SIZE]; +} __packed; #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c struct hci_cp_link_key_neg_reply { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_PIN_CODE_REPLY 0x040d struct hci_cp_pin_code_reply { bdaddr_t bdaddr; __u8 pin_len; __u8 pin_code[16]; -} __attribute__ ((packed)); +} __packed; +struct hci_rp_pin_code_reply { + __u8 status; + bdaddr_t bdaddr; +} __packed; #define HCI_OP_PIN_CODE_NEG_REPLY 0x040e struct hci_cp_pin_code_neg_reply { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; +struct hci_rp_pin_code_neg_reply { + __u8 status; + bdaddr_t bdaddr; +} __packed; #define HCI_OP_CHANGE_CONN_PTYPE 0x040f struct hci_cp_change_conn_ptype { __le16 handle; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_AUTH_REQUESTED 0x0411 struct hci_cp_auth_requested { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SET_CONN_ENCRYPT 0x0413 struct hci_cp_set_conn_encrypt { __le16 handle; __u8 encrypt; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415 struct hci_cp_change_conn_link_key { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REMOTE_NAME_REQ 0x0419 struct hci_cp_remote_name_req { @@ -301,28 +513,28 @@ struct hci_cp_remote_name_req { __u8 pscan_rep_mode; __u8 pscan_mode; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a struct hci_cp_remote_name_req_cancel { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_FEATURES 0x041b struct hci_cp_read_remote_features { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c struct hci_cp_read_remote_ext_features { __le16 handle; __u8 page; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_VERSION 0x041d struct hci_cp_read_remote_version { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SETUP_SYNC_CONN 0x0428 struct hci_cp_setup_sync_conn { @@ -333,7 +545,7 @@ struct hci_cp_setup_sync_conn { __le16 voice_setting; __u8 retrans_effort; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429 struct hci_cp_accept_sync_conn_req { @@ -344,13 +556,141 @@ struct hci_cp_accept_sync_conn_req { __le16 content_format; __u8 retrans_effort; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a struct hci_cp_reject_sync_conn_req { bdaddr_t bdaddr; __u8 reason; -} __attribute__ ((packed)); +} __packed; + +#define HCI_OP_IO_CAPABILITY_REPLY 0x042b +struct hci_cp_io_capability_reply { + bdaddr_t bdaddr; + __u8 capability; + __u8 oob_data; + __u8 authentication; +} __packed; + +#define HCI_OP_USER_CONFIRM_REPLY 0x042c +struct hci_cp_user_confirm_reply { + bdaddr_t bdaddr; +} __packed; +struct hci_rp_user_confirm_reply { + __u8 status; + bdaddr_t bdaddr; +} __packed; + +#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d + +#define HCI_OP_USER_PASSKEY_REPLY 0x042e +struct hci_cp_user_passkey_reply { + bdaddr_t bdaddr; + __le32 passkey; +} __packed; + +#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f + +#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430 +struct hci_cp_remote_oob_data_reply { + bdaddr_t bdaddr; + __u8 hash[16]; + __u8 randomizer[16]; +} __packed; + +#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433 +struct hci_cp_remote_oob_data_neg_reply { + bdaddr_t bdaddr; +} __packed; + +#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434 +struct hci_cp_io_capability_neg_reply { + bdaddr_t bdaddr; + __u8 reason; +} __packed; + +#define HCI_OP_CREATE_PHY_LINK 0x0435 +struct hci_cp_create_phy_link { + __u8 phy_handle; + __u8 key_len; + __u8 key_type; + __u8 key[HCI_AMP_LINK_KEY_SIZE]; +} __packed; + +#define HCI_OP_ACCEPT_PHY_LINK 0x0436 +struct hci_cp_accept_phy_link { + __u8 phy_handle; + __u8 key_len; + __u8 key_type; + __u8 key[HCI_AMP_LINK_KEY_SIZE]; +} __packed; + +#define HCI_OP_DISCONN_PHY_LINK 0x0437 +struct hci_cp_disconn_phy_link { + __u8 phy_handle; + __u8 reason; +} __packed; + +struct ext_flow_spec { + __u8 id; + __u8 stype; + __le16 msdu; + __le32 sdu_itime; + __le32 acc_lat; + __le32 flush_to; +} __packed; + +#define HCI_OP_CREATE_LOGICAL_LINK 0x0438 +#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439 +struct hci_cp_create_accept_logical_link { + __u8 phy_handle; + struct ext_flow_spec tx_flow_spec; + struct ext_flow_spec rx_flow_spec; +} __packed; + +#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a +struct hci_cp_disconn_logical_link { + __le16 log_handle; +} __packed; + +#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b +struct hci_cp_logical_link_cancel { + __u8 phy_handle; + __u8 flow_spec_id; +} __packed; + +struct hci_rp_logical_link_cancel { + __u8 status; + __u8 phy_handle; + __u8 flow_spec_id; +} __packed; + +#define HCI_OP_SET_CSB 0x0441 +struct hci_cp_set_csb { + __u8 enable; + __u8 lt_addr; + __u8 lpo_allowed; + __le16 packet_type; + __le16 interval_min; + __le16 interval_max; + __le16 csb_sv_tout; +} __packed; +struct hci_rp_set_csb { + __u8 status; + __u8 lt_addr; + __le16 interval; +} __packed; + +#define HCI_OP_START_SYNC_TRAIN 0x0443 + +#define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY 0x0445 +struct hci_cp_remote_oob_ext_data_reply { + bdaddr_t bdaddr; + __u8 hash192[16]; + __u8 randomizer192[16]; + __u8 hash256[16]; + __u8 randomizer256[16]; +} __packed; #define HCI_OP_SNIFF_MODE 0x0803 struct hci_cp_sniff_mode { @@ -359,48 +699,59 @@ struct hci_cp_sniff_mode { __le16 min_interval; __le16 attempt; __le16 timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_EXIT_SNIFF_MODE 0x0804 struct hci_cp_exit_sniff_mode { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ROLE_DISCOVERY 0x0809 struct hci_cp_role_discovery { __le16 handle; -} __attribute__ ((packed)); +} __packed; struct hci_rp_role_discovery { __u8 status; __le16 handle; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SWITCH_ROLE 0x080b struct hci_cp_switch_role { bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LINK_POLICY 0x080c struct hci_cp_read_link_policy { __le16 handle; -} __attribute__ ((packed)); +} __packed; struct hci_rp_read_link_policy { __u8 status; __le16 handle; __le16 policy; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_LINK_POLICY 0x080d struct hci_cp_write_link_policy { __le16 handle; __le16 policy; -} __attribute__ ((packed)); +} __packed; struct hci_rp_write_link_policy { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; + +#define HCI_OP_READ_DEF_LINK_POLICY 0x080e +struct hci_rp_read_def_link_policy { + __u8 status; + __le16 policy; +} __packed; + +#define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f +struct hci_cp_write_def_link_policy { + __le16 policy; +} __packed; #define HCI_OP_SNIFF_SUBRATE 0x0811 struct hci_cp_sniff_subrate { @@ -408,12 +759,9 @@ struct hci_cp_sniff_subrate { __le16 max_latency; __le16 min_remote_timeout; __le16 min_local_timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SET_EVENT_MASK 0x0c01 -struct hci_cp_set_event_mask { - __u8 mask[8]; -} __attribute__ ((packed)); #define HCI_OP_RESET 0x0c03 @@ -422,7 +770,7 @@ struct hci_cp_set_event_flt { __u8 flt_type; __u8 cond_type; __u8 condition[0]; -} __attribute__ ((packed)); +} __packed; /* Filter types */ #define HCI_FLT_CLEAR_ALL 0x00 @@ -438,22 +786,30 @@ struct hci_cp_set_event_flt { #define HCI_CONN_SETUP_AUTO_OFF 0x01 #define HCI_CONN_SETUP_AUTO_ON 0x02 +#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12 +struct hci_cp_delete_stored_link_key { + bdaddr_t bdaddr; + __u8 delete_all; +} __packed; + +#define HCI_MAX_NAME_LENGTH 248 + #define HCI_OP_WRITE_LOCAL_NAME 0x0c13 struct hci_cp_write_local_name { - __u8 name[248]; -} __attribute__ ((packed)); + __u8 name[HCI_MAX_NAME_LENGTH]; +} __packed; #define HCI_OP_READ_LOCAL_NAME 0x0c14 struct hci_rp_read_local_name { __u8 status; - __u8 name[248]; -} __attribute__ ((packed)); + __u8 name[HCI_MAX_NAME_LENGTH]; +} __packed; #define HCI_OP_WRITE_CA_TIMEOUT 0x0c16 #define HCI_OP_WRITE_PG_TIMEOUT 0x0c18 -#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a +#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a #define SCAN_DISABLED 0x00 #define SCAN_INQUIRY 0x01 #define SCAN_PAGE 0x02 @@ -475,23 +831,23 @@ struct hci_rp_read_local_name { struct hci_rp_read_class_of_dev { __u8 status; __u8 dev_class[3]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24 struct hci_cp_write_class_of_dev { __u8 dev_class[3]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_VOICE_SETTING 0x0c25 struct hci_rp_read_voice_setting { __u8 status; __le16 voice_setting; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_VOICE_SETTING 0x0c26 struct hci_cp_write_voice_setting { __le16 voice_setting; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_HOST_BUFFER_SIZE 0x0c33 struct hci_cp_host_buffer_size { @@ -499,7 +855,135 @@ struct hci_cp_host_buffer_size { __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; -} __attribute__ ((packed)); +} __packed; + +#define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38 +struct hci_rp_read_num_supported_iac { + __u8 status; + __u8 num_iac; +} __packed; + +#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39 + +#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a +struct hci_cp_write_current_iac_lap { + __u8 num_iac; + __u8 iac_lap[6]; +} __packed; + +#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45 + +#define HCI_MAX_EIR_LENGTH 240 + +#define HCI_OP_WRITE_EIR 0x0c52 +struct hci_cp_write_eir { + __u8 fec; + __u8 data[HCI_MAX_EIR_LENGTH]; +} __packed; + +#define HCI_OP_READ_SSP_MODE 0x0c55 +struct hci_rp_read_ssp_mode { + __u8 status; + __u8 mode; +} __packed; + +#define HCI_OP_WRITE_SSP_MODE 0x0c56 +struct hci_cp_write_ssp_mode { + __u8 mode; +} __packed; + +#define HCI_OP_READ_LOCAL_OOB_DATA 0x0c57 +struct hci_rp_read_local_oob_data { + __u8 status; + __u8 hash[16]; + __u8 randomizer[16]; +} __packed; + +#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 +struct hci_rp_read_inq_rsp_tx_power { + __u8 status; + __s8 tx_power; +} __packed; + +#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63 + +#define HCI_OP_READ_LOCATION_DATA 0x0c64 + +#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66 +struct hci_rp_read_flow_control_mode { + __u8 status; + __u8 mode; +} __packed; + +#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d +struct hci_cp_write_le_host_supported { + __u8 le; + __u8 simul; +} __packed; + +#define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74 +struct hci_cp_set_reserved_lt_addr { + __u8 lt_addr; +} __packed; +struct hci_rp_set_reserved_lt_addr { + __u8 status; + __u8 lt_addr; +} __packed; + +#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75 +struct hci_cp_delete_reserved_lt_addr { + __u8 lt_addr; +} __packed; +struct hci_rp_delete_reserved_lt_addr { + __u8 status; + __u8 lt_addr; +} __packed; + +#define HCI_OP_SET_CSB_DATA 0x0c76 +struct hci_cp_set_csb_data { + __u8 lt_addr; + __u8 fragment; + __u8 data_length; + __u8 data[HCI_MAX_CSB_DATA_SIZE]; +} __packed; +struct hci_rp_set_csb_data { + __u8 status; + __u8 lt_addr; +} __packed; + +#define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77 + +#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78 +struct hci_cp_write_sync_train_params { + __le16 interval_min; + __le16 interval_max; + __le32 sync_train_tout; + __u8 service_data; +} __packed; +struct hci_rp_write_sync_train_params { + __u8 status; + __le16 sync_train_int; +} __packed; + +#define HCI_OP_READ_SC_SUPPORT 0x0c79 +struct hci_rp_read_sc_support { + __u8 status; + __u8 support; +} __packed; + +#define HCI_OP_WRITE_SC_SUPPORT 0x0c7a +struct hci_cp_write_sc_support { + __u8 support; +} __packed; + +#define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d +struct hci_rp_read_local_oob_ext_data { + __u8 status; + __u8 hash192[16]; + __u8 randomizer192[16]; + __u8 hash256[16]; + __u8 randomizer256[16]; +} __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 struct hci_rp_read_local_version { @@ -509,27 +993,30 @@ struct hci_rp_read_local_version { __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_COMMANDS 0x1002 struct hci_rp_read_local_commands { __u8 status; __u8 commands[64]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_FEATURES 0x1003 struct hci_rp_read_local_features { __u8 status; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 +struct hci_cp_read_local_ext_features { + __u8 page; +} __packed; struct hci_rp_read_local_ext_features { __u8 status; __u8 page; __u8 max_page; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_BUFFER_SIZE 0x1005 struct hci_rp_read_buffer_size { @@ -538,13 +1025,271 @@ struct hci_rp_read_buffer_size { __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_BD_ADDR 0x1009 struct hci_rp_read_bd_addr { __u8 status; bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; + +#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a +struct hci_rp_read_data_block_size { + __u8 status; + __le16 max_acl_len; + __le16 block_len; + __le16 num_blocks; +} __packed; + +#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b +struct hci_rp_read_page_scan_activity { + __u8 status; + __le16 interval; + __le16 window; +} __packed; + +#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c +struct hci_cp_write_page_scan_activity { + __le16 interval; + __le16 window; +} __packed; + +#define HCI_OP_READ_TX_POWER 0x0c2d +struct hci_cp_read_tx_power { + __le16 handle; + __u8 type; +} __packed; +struct hci_rp_read_tx_power { + __u8 status; + __le16 handle; + __s8 tx_power; +} __packed; + +#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46 +struct hci_rp_read_page_scan_type { + __u8 status; + __u8 type; +} __packed; + +#define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47 + #define PAGE_SCAN_TYPE_STANDARD 0x00 + #define PAGE_SCAN_TYPE_INTERLACED 0x01 + +#define HCI_OP_READ_RSSI 0x1405 +struct hci_cp_read_rssi { + __le16 handle; +} __packed; +struct hci_rp_read_rssi { + __u8 status; + __le16 handle; + __s8 rssi; +} __packed; + +#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409 +struct hci_rp_read_local_amp_info { + __u8 status; + __u8 amp_status; + __le32 total_bw; + __le32 max_bw; + __le32 min_latency; + __le32 max_pdu; + __u8 amp_type; + __le16 pal_cap; + __le16 max_assoc_size; + __le32 max_flush_to; + __le32 be_flush_to; +} __packed; + +#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a +struct hci_cp_read_local_amp_assoc { + __u8 phy_handle; + __le16 len_so_far; + __le16 max_len; +} __packed; +struct hci_rp_read_local_amp_assoc { + __u8 status; + __u8 phy_handle; + __le16 rem_len; + __u8 frag[0]; +} __packed; + +#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b +struct hci_cp_write_remote_amp_assoc { + __u8 phy_handle; + __le16 len_so_far; + __le16 rem_len; + __u8 frag[0]; +} __packed; +struct hci_rp_write_remote_amp_assoc { + __u8 status; + __u8 phy_handle; +} __packed; + +#define HCI_OP_ENABLE_DUT_MODE 0x1803 + +#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804 + +#define HCI_OP_LE_SET_EVENT_MASK 0x2001 +struct hci_cp_le_set_event_mask { + __u8 mask[8]; +} __packed; + +#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002 +struct hci_rp_le_read_buffer_size { + __u8 status; + __le16 le_mtu; + __u8 le_max_pkt; +} __packed; + +#define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003 +struct hci_rp_le_read_local_features { + __u8 status; + __u8 features[8]; +} __packed; + +#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005 + +#define HCI_OP_LE_SET_ADV_PARAM 0x2006 +struct hci_cp_le_set_adv_param { + __le16 min_interval; + __le16 max_interval; + __u8 type; + __u8 own_address_type; + __u8 direct_addr_type; + bdaddr_t direct_addr; + __u8 channel_map; + __u8 filter_policy; +} __packed; + +#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007 +struct hci_rp_le_read_adv_tx_power { + __u8 status; + __s8 tx_power; +} __packed; + +#define HCI_MAX_AD_LENGTH 31 + +#define HCI_OP_LE_SET_ADV_DATA 0x2008 +struct hci_cp_le_set_adv_data { + __u8 length; + __u8 data[HCI_MAX_AD_LENGTH]; +} __packed; + +#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009 +struct hci_cp_le_set_scan_rsp_data { + __u8 length; + __u8 data[HCI_MAX_AD_LENGTH]; +} __packed; + +#define HCI_OP_LE_SET_ADV_ENABLE 0x200a + +#define LE_SCAN_PASSIVE 0x00 +#define LE_SCAN_ACTIVE 0x01 + +#define HCI_OP_LE_SET_SCAN_PARAM 0x200b +struct hci_cp_le_set_scan_param { + __u8 type; + __le16 interval; + __le16 window; + __u8 own_address_type; + __u8 filter_policy; +} __packed; + +#define LE_SCAN_DISABLE 0x00 +#define LE_SCAN_ENABLE 0x01 +#define LE_SCAN_FILTER_DUP_DISABLE 0x00 +#define LE_SCAN_FILTER_DUP_ENABLE 0x01 + +#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c +struct hci_cp_le_set_scan_enable { + __u8 enable; + __u8 filter_dup; +} __packed; + +#define HCI_LE_USE_PEER_ADDR 0x00 +#define HCI_LE_USE_WHITELIST 0x01 + +#define HCI_OP_LE_CREATE_CONN 0x200d +struct hci_cp_le_create_conn { + __le16 scan_interval; + __le16 scan_window; + __u8 filter_policy; + __u8 peer_addr_type; + bdaddr_t peer_addr; + __u8 own_address_type; + __le16 conn_interval_min; + __le16 conn_interval_max; + __le16 conn_latency; + __le16 supervision_timeout; + __le16 min_ce_len; + __le16 max_ce_len; +} __packed; + +#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e + +#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f +struct hci_rp_le_read_white_list_size { + __u8 status; + __u8 size; +} __packed; + +#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010 + +#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011 +struct hci_cp_le_add_to_white_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; +} __packed; + +#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012 +struct hci_cp_le_del_from_white_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; +} __packed; + +#define HCI_OP_LE_CONN_UPDATE 0x2013 +struct hci_cp_le_conn_update { + __le16 handle; + __le16 conn_interval_min; + __le16 conn_interval_max; + __le16 conn_latency; + __le16 supervision_timeout; + __le16 min_ce_len; + __le16 max_ce_len; +} __packed; + +#define HCI_OP_LE_START_ENC 0x2019 +struct hci_cp_le_start_enc { + __le16 handle; + __le64 rand; + __le16 ediv; + __u8 ltk[16]; +} __packed; + +#define HCI_OP_LE_LTK_REPLY 0x201a +struct hci_cp_le_ltk_reply { + __le16 handle; + __u8 ltk[16]; +} __packed; +struct hci_rp_le_ltk_reply { + __u8 status; + __le16 handle; +} __packed; + +#define HCI_OP_LE_LTK_NEG_REPLY 0x201b +struct hci_cp_le_ltk_neg_reply { + __le16 handle; +} __packed; +struct hci_rp_le_ltk_neg_reply { + __u8 status; + __le16 handle; +} __packed; + +#define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c +struct hci_rp_le_read_supported_states { + __u8 status; + __u8 le_states[8]; +} __packed; /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 @@ -557,7 +1302,7 @@ struct inquiry_info { __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CONN_COMPLETE 0x03 struct hci_ev_conn_complete { @@ -566,54 +1311,54 @@ struct hci_ev_conn_complete { bdaddr_t bdaddr; __u8 link_type; __u8 encr_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CONN_REQUEST 0x04 struct hci_ev_conn_request { bdaddr_t bdaddr; __u8 dev_class[3]; __u8 link_type; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_DISCONN_COMPLETE 0x05 struct hci_ev_disconn_complete { __u8 status; __le16 handle; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_AUTH_COMPLETE 0x06 struct hci_ev_auth_complete { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_NAME 0x07 struct hci_ev_remote_name { __u8 status; bdaddr_t bdaddr; - __u8 name[248]; -} __attribute__ ((packed)); + __u8 name[HCI_MAX_NAME_LENGTH]; +} __packed; #define HCI_EV_ENCRYPT_CHANGE 0x08 struct hci_ev_encrypt_change { __u8 status; __le16 handle; __u8 encrypt; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09 struct hci_ev_change_link_key_complete { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_FEATURES 0x0b struct hci_ev_remote_features { __u8 status; __le16 handle; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_VERSION 0x0c struct hci_ev_remote_version { @@ -622,7 +1367,7 @@ struct hci_ev_remote_version { __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_QOS_SETUP_COMPLETE 0x0d struct hci_qos { @@ -631,38 +1376,43 @@ struct hci_qos { __u32 peak_bandwidth; __u32 latency; __u32 delay_variation; -} __attribute__ ((packed)); +} __packed; struct hci_ev_qos_setup_complete { __u8 status; __le16 handle; struct hci_qos qos; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CMD_COMPLETE 0x0e struct hci_ev_cmd_complete { __u8 ncmd; __le16 opcode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CMD_STATUS 0x0f struct hci_ev_cmd_status { __u8 status; __u8 ncmd; __le16 opcode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_NUM_COMP_PKTS 0x13 +struct hci_comp_pkts_info { + __le16 handle; + __le16 count; +} __packed; + struct hci_ev_num_comp_pkts { __u8 num_hndl; - /* variable length part */ -} __attribute__ ((packed)); + struct hci_comp_pkts_info handles[0]; +} __packed; #define HCI_EV_MODE_CHANGE 0x14 struct hci_ev_mode_change { @@ -670,37 +1420,44 @@ struct hci_ev_mode_change { __le16 handle; __u8 mode; __le16 interval; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_PIN_CODE_REQ 0x16 struct hci_ev_pin_code_req { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_LINK_KEY_REQ 0x17 struct hci_ev_link_key_req { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_LINK_KEY_NOTIFY 0x18 struct hci_ev_link_key_notify { bdaddr_t bdaddr; - __u8 link_key[16]; + __u8 link_key[HCI_LINK_KEY_SIZE]; __u8 key_type; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CLOCK_OFFSET 0x1c struct hci_ev_clock_offset { __u8 status; __le16 handle; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; + +#define HCI_EV_PKT_TYPE_CHANGE 0x1d +struct hci_ev_pkt_type_change { + __u8 status; + __le16 handle; + __le16 pkt_type; +} __packed; #define HCI_EV_PSCAN_REP_MODE 0x20 struct hci_ev_pscan_rep_mode { bdaddr_t bdaddr; __u8 pscan_rep_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 struct inquiry_info_with_rssi { @@ -710,7 +1467,7 @@ struct inquiry_info_with_rssi { __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; -} __attribute__ ((packed)); +} __packed; struct inquiry_info_with_rssi_and_pscan_mode { bdaddr_t bdaddr; __u8 pscan_rep_mode; @@ -719,7 +1476,7 @@ struct inquiry_info_with_rssi_and_pscan_mode { __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_EXT_FEATURES 0x23 struct hci_ev_remote_ext_features { @@ -728,7 +1485,7 @@ struct hci_ev_remote_ext_features { __u8 page; __u8 max_page; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SYNC_CONN_COMPLETE 0x2c struct hci_ev_sync_conn_complete { @@ -741,7 +1498,7 @@ struct hci_ev_sync_conn_complete { __le16 rx_pkt_len; __le16 tx_pkt_len; __u8 air_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SYNC_CONN_CHANGED 0x2d struct hci_ev_sync_conn_changed { @@ -751,7 +1508,7 @@ struct hci_ev_sync_conn_changed { __u8 retrans_window; __le16 rx_pkt_len; __le16 tx_pkt_len; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SNIFF_SUBRATE 0x2e struct hci_ev_sniff_subrate { @@ -761,7 +1518,7 @@ struct hci_ev_sniff_subrate { __le16 max_rx_latency; __le16 max_remote_timeout; __le16 max_local_timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f struct extended_inquiry_info { @@ -772,20 +1529,185 @@ struct extended_inquiry_info { __le16 clock_offset; __s8 rssi; __u8 data[240]; -} __attribute__ ((packed)); +} __packed; + +#define HCI_EV_KEY_REFRESH_COMPLETE 0x30 +struct hci_ev_key_refresh_complete { + __u8 status; + __le16 handle; +} __packed; + +#define HCI_EV_IO_CAPA_REQUEST 0x31 +struct hci_ev_io_capa_request { + bdaddr_t bdaddr; +} __packed; + +#define HCI_EV_IO_CAPA_REPLY 0x32 +struct hci_ev_io_capa_reply { + bdaddr_t bdaddr; + __u8 capability; + __u8 oob_data; + __u8 authentication; +} __packed; + +#define HCI_EV_USER_CONFIRM_REQUEST 0x33 +struct hci_ev_user_confirm_req { + bdaddr_t bdaddr; + __le32 passkey; +} __packed; + +#define HCI_EV_USER_PASSKEY_REQUEST 0x34 +struct hci_ev_user_passkey_req { + bdaddr_t bdaddr; +} __packed; + +#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35 +struct hci_ev_remote_oob_data_request { + bdaddr_t bdaddr; +} __packed; + +#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36 +struct hci_ev_simple_pair_complete { + __u8 status; + bdaddr_t bdaddr; +} __packed; + +#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b +struct hci_ev_user_passkey_notify { + bdaddr_t bdaddr; + __le32 passkey; +} __packed; + +#define HCI_KEYPRESS_STARTED 0 +#define HCI_KEYPRESS_ENTERED 1 +#define HCI_KEYPRESS_ERASED 2 +#define HCI_KEYPRESS_CLEARED 3 +#define HCI_KEYPRESS_COMPLETED 4 + +#define HCI_EV_KEYPRESS_NOTIFY 0x3c +struct hci_ev_keypress_notify { + bdaddr_t bdaddr; + __u8 type; +} __packed; + +#define HCI_EV_REMOTE_HOST_FEATURES 0x3d +struct hci_ev_remote_host_features { + bdaddr_t bdaddr; + __u8 features[8]; +} __packed; + +#define HCI_EV_LE_META 0x3e +struct hci_ev_le_meta { + __u8 subevent; +} __packed; + +#define HCI_EV_PHY_LINK_COMPLETE 0x40 +struct hci_ev_phy_link_complete { + __u8 status; + __u8 phy_handle; +} __packed; + +#define HCI_EV_CHANNEL_SELECTED 0x41 +struct hci_ev_channel_selected { + __u8 phy_handle; +} __packed; + +#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42 +struct hci_ev_disconn_phy_link_complete { + __u8 status; + __u8 phy_handle; + __u8 reason; +} __packed; + +#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45 +struct hci_ev_logical_link_complete { + __u8 status; + __le16 handle; + __u8 phy_handle; + __u8 flow_spec_id; +} __packed; + +#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46 +struct hci_ev_disconn_logical_link_complete { + __u8 status; + __le16 handle; + __u8 reason; +} __packed; + +#define HCI_EV_NUM_COMP_BLOCKS 0x48 +struct hci_comp_blocks_info { + __le16 handle; + __le16 pkts; + __le16 blocks; +} __packed; + +struct hci_ev_num_comp_blocks { + __le16 num_blocks; + __u8 num_hndl; + struct hci_comp_blocks_info handles[0]; +} __packed; + +#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F +struct hci_ev_sync_train_complete { + __u8 status; +} __packed; + +#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54 + +/* Low energy meta events */ +#define LE_CONN_ROLE_MASTER 0x00 + +#define HCI_EV_LE_CONN_COMPLETE 0x01 +struct hci_ev_le_conn_complete { + __u8 status; + __le16 handle; + __u8 role; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __le16 interval; + __le16 latency; + __le16 supervision_timeout; + __u8 clk_accurancy; +} __packed; + +#define HCI_EV_LE_LTK_REQ 0x05 +struct hci_ev_le_ltk_req { + __le16 handle; + __le64 rand; + __le16 ediv; +} __packed; + +/* Advertising report event types */ +#define LE_ADV_IND 0x00 +#define LE_ADV_DIRECT_IND 0x01 +#define LE_ADV_SCAN_IND 0x02 +#define LE_ADV_NONCONN_IND 0x03 +#define LE_ADV_SCAN_RSP 0x04 + +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 + +#define HCI_EV_LE_ADVERTISING_REPORT 0x02 +struct hci_ev_le_advertising_info { + __u8 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 length; + __u8 data[0]; +} __packed; /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { __u16 type; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SI_DEVICE 0x01 struct hci_ev_si_device { __u16 event; __u16 dev_id; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SI_SECURITY 0x02 struct hci_ev_si_security { @@ -793,7 +1715,7 @@ struct hci_ev_si_security { __u16 proto; __u16 subproto; __u8 incoming; -} __attribute__ ((packed)); +} __packed; /* ---- HCI Packet structures ---- */ #define HCI_COMMAND_HDR_SIZE 3 @@ -803,26 +1725,24 @@ struct hci_ev_si_security { struct hci_command_hdr { __le16 opcode; /* OCF & OGF */ - __u8 plen; -} __attribute__ ((packed)); + __u8 plen; +} __packed; struct hci_event_hdr { __u8 evt; __u8 plen; -} __attribute__ ((packed)); +} __packed; struct hci_acl_hdr { __le16 handle; /* Handle & Flags(PB, BC) */ __le16 dlen; -} __attribute__ ((packed)); +} __packed; struct hci_sco_hdr { __le16 handle; __u8 dlen; -} __attribute__ ((packed)); +} __packed; -#ifdef __KERNEL__ -#include <linux/skbuff.h> static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) { return (struct hci_event_hdr *) skb->data; @@ -837,15 +1757,14 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) { return (struct hci_sco_hdr *) skb->data; } -#endif /* Command opcode pack/unpack */ -#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) +#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10))) #define hci_opcode_ogf(op) (op >> 10) #define hci_opcode_ocf(op) (op & 0x03ff) /* ACL handle and flags pack/unpack */ -#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) +#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12))) #define hci_handle(h) (h & 0x0fff) #define hci_flags(h) (h >> 12) @@ -863,9 +1782,15 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) struct sockaddr_hci { sa_family_t hci_family; unsigned short hci_dev; + unsigned short hci_channel; }; #define HCI_DEV_NONE 0xffff +#define HCI_CHANNEL_RAW 0 +#define HCI_CHANNEL_USER 1 +#define HCI_CHANNEL_MONITOR 2 +#define HCI_CHANNEL_CONTROL 3 + struct hci_filter { unsigned long type_mask; unsigned long event_mask[2]; @@ -951,6 +1876,11 @@ struct hci_conn_info_req { struct hci_conn_info conn_info[0]; }; +struct hci_auth_info_req { + bdaddr_t bdaddr; + __u8 type; +}; + struct hci_inquiry_req { __u16 dev_id; __u16 flags; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ea13baa3851..b386bf17e6c 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1,6 +1,6 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -12,13 +12,13 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ @@ -27,9 +27,8 @@ #include <net/bluetooth/hci.h> -/* HCI upper protocols */ -#define HCI_PROTO_L2CAP 0 -#define HCI_PROTO_SCO 1 +/* HCI priority */ +#define HCI_PRIO_MAX 7 /* HCI Core structures */ struct inquiry_data { @@ -40,45 +39,179 @@ struct inquiry_data { __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; + __u8 ssp_mode; }; struct inquiry_entry { - struct inquiry_entry *next; + struct list_head all; /* inq_cache.all */ + struct list_head list; /* unknown or resolve */ + enum { + NAME_NOT_KNOWN, + NAME_NEEDED, + NAME_PENDING, + NAME_KNOWN, + } name_state; __u32 timestamp; struct inquiry_data data; }; -struct inquiry_cache { - spinlock_t lock; +struct discovery_state { + int type; + enum { + DISCOVERY_STOPPED, + DISCOVERY_STARTING, + DISCOVERY_FINDING, + DISCOVERY_RESOLVING, + DISCOVERY_STOPPING, + } state; + struct list_head all; /* All devices found during inquiry */ + struct list_head unknown; /* Name state not known */ + struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; - struct inquiry_entry *list; + bdaddr_t last_adv_addr; + u8 last_adv_addr_type; + s8 last_adv_rssi; + u8 last_adv_data[HCI_MAX_AD_LENGTH]; + u8 last_adv_data_len; }; struct hci_conn_hash { struct list_head list; - spinlock_t lock; unsigned int acl_num; + unsigned int amp_num; unsigned int sco_num; + unsigned int le_num; }; +struct bdaddr_list { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; +}; + +struct bt_uuid { + struct list_head list; + u8 uuid[16]; + u8 size; + u8 svc_hint; +}; + +struct smp_csrk { + bdaddr_t bdaddr; + u8 bdaddr_type; + u8 master; + u8 val[16]; +}; + +struct smp_ltk { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u8 authenticated; + u8 type; + u8 enc_size; + __le16 ediv; + __le64 rand; + u8 val[16]; +}; + +struct smp_irk { + struct list_head list; + bdaddr_t rpa; + bdaddr_t bdaddr; + u8 addr_type; + u8 val[16]; +}; + +struct link_key { + struct list_head list; + bdaddr_t bdaddr; + u8 type; + u8 val[HCI_LINK_KEY_SIZE]; + u8 pin_len; +}; + +struct oob_data { + struct list_head list; + bdaddr_t bdaddr; + u8 hash192[16]; + u8 randomizer192[16]; + u8 hash256[16]; + u8 randomizer256[16]; +}; + +#define HCI_MAX_SHORT_NAME_LENGTH 10 + +/* Default LE RPA expiry time, 15 minutes */ +#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) + +/* Default min/max age of connection information (1s/3s) */ +#define DEFAULT_CONN_INFO_MIN_AGE 1000 +#define DEFAULT_CONN_INFO_MAX_AGE 3000 + +struct amp_assoc { + __u16 len; + __u16 offset; + __u16 rem_len; + __u16 len_so_far; + __u8 data[HCI_MAX_AMP_ASSOC_SIZE]; +}; + +#define HCI_MAX_PAGES 3 + +#define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; - spinlock_t lock; - atomic_t refcnt; + struct mutex lock; char name[8]; unsigned long flags; __u16 id; - __u8 type; + __u8 bus; + __u8 dev_type; bdaddr_t bdaddr; - __u8 dev_name[248]; + bdaddr_t random_addr; + bdaddr_t static_addr; + __u8 adv_addr_type; + __u8 dev_name[HCI_MAX_NAME_LENGTH]; + __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; + __u8 eir[HCI_MAX_EIR_LENGTH]; __u8 dev_class[3]; - __u8 features[8]; + __u8 major_class; + __u8 minor_class; + __u8 max_page; + __u8 features[HCI_MAX_PAGES][8]; + __u8 le_features[8]; + __u8 le_white_list_size; + __u8 le_states[8]; __u8 commands[64]; __u8 hci_ver; __u16 hci_rev; + __u8 lmp_ver; __u16 manufacturer; + __u16 lmp_subver; __u16 voice_setting; + __u8 num_iac; + __u8 io_capability; + __s8 inq_tx_power; + __u16 page_scan_interval; + __u16 page_scan_window; + __u8 page_scan_type; + __u8 le_adv_channel_map; + __u8 le_scan_type; + __u16 le_scan_interval; + __u16 le_scan_window; + __u16 le_conn_min_interval; + __u16 le_conn_max_interval; + __u16 discov_interleaved_timeout; + __u16 conn_info_min_age; + __u16 conn_info_max_age; + __u8 ssp_debug_mode; + + __u16 devid_source; + __u16 devid_vendor; + __u16 devid_product; + __u16 devid_version; __u16 pkt_type; __u16 esco_type; @@ -89,134 +222,268 @@ struct hci_dev { __u16 sniff_min_interval; __u16 sniff_max_interval; + __u8 amp_status; + __u32 amp_total_bw; + __u32 amp_max_bw; + __u32 amp_min_latency; + __u32 amp_max_pdu; + __u8 amp_type; + __u16 amp_pal_cap; + __u16 amp_assoc_size; + __u32 amp_max_flush_to; + __u32 amp_be_flush_to; + + struct amp_assoc loc_assoc; + + __u8 flow_ctl_mode; + + unsigned int auto_accept_delay; + unsigned long quirks; atomic_t cmd_cnt; unsigned int acl_cnt; unsigned int sco_cnt; + unsigned int le_cnt; unsigned int acl_mtu; unsigned int sco_mtu; + unsigned int le_mtu; unsigned int acl_pkts; unsigned int sco_pkts; + unsigned int le_pkts; + + __u16 block_len; + __u16 block_mtu; + __u16 num_blocks; + __u16 block_cnt; - unsigned long cmd_last_tx; unsigned long acl_last_tx; unsigned long sco_last_tx; + unsigned long le_last_tx; + + struct workqueue_struct *workqueue; + struct workqueue_struct *req_workqueue; + + struct work_struct power_on; + struct delayed_work power_off; - struct tasklet_struct cmd_task; - struct tasklet_struct rx_task; - struct tasklet_struct tx_task; + __u16 discov_timeout; + struct delayed_work discov_off; + + struct delayed_work service_cache; + + struct timer_list cmd_timer; + + struct work_struct rx_work; + struct work_struct cmd_work; + struct work_struct tx_work; struct sk_buff_head rx_q; struct sk_buff_head raw_q; struct sk_buff_head cmd_q; + struct sk_buff *recv_evt; struct sk_buff *sent_cmd; - struct sk_buff *reassembly[3]; + struct sk_buff *reassembly[NUM_REASSEMBLY]; - struct semaphore req_lock; + struct mutex req_lock; wait_queue_head_t req_wait_q; __u32 req_status; __u32 req_result; - struct inquiry_cache inq_cache; + struct crypto_blkcipher *tfm_aes; + + struct discovery_state discovery; struct hci_conn_hash conn_hash; - struct hci_dev_stats stat; + struct list_head mgmt_pending; + struct list_head blacklist; + struct list_head uuids; + struct list_head link_keys; + struct list_head long_term_keys; + struct list_head identity_resolving_keys; + struct list_head remote_oob_data; + struct list_head le_white_list; + struct list_head le_conn_params; + struct list_head pend_le_conns; - struct sk_buff_head driver_init; + struct hci_dev_stats stat; - void *driver_data; - void *core_data; + atomic_t promisc; - atomic_t promisc; + struct dentry *debugfs; - struct device *parent; struct device dev; - struct module *owner; + struct rfkill *rfkill; + + unsigned long dev_flags; + + struct delayed_work le_scan_disable; + + __s8 adv_tx_power; + __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u8 adv_data_len; + __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __u8 scan_rsp_data_len; + + __u8 irk[16]; + __u32 rpa_timeout; + struct delayed_work rpa_expired; + bdaddr_t rpa; int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); - int (*send)(struct sk_buff *skb); - void (*destruct)(struct hci_dev *hdev); + int (*setup)(struct hci_dev *hdev); + int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); - int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg); }; +#define HCI_PHY_HANDLE(handle) (handle & 0xff) + struct hci_conn { struct list_head list; - atomic_t refcnt; - spinlock_t lock; - - bdaddr_t dst; - __u16 handle; - __u16 state; - __u8 mode; - __u8 type; - __u8 out; - __u8 attempt; - __u8 dev_class[3]; - __u8 features[8]; - __u16 interval; - __u16 link_policy; - __u32 link_mode; - __u8 power_save; - unsigned long pend; - - unsigned int sent; + atomic_t refcnt; - struct sk_buff_head data_q; + bdaddr_t dst; + __u8 dst_type; + bdaddr_t src; + __u8 src_type; + bdaddr_t init_addr; + __u8 init_addr_type; + bdaddr_t resp_addr; + __u8 resp_addr_type; + __u16 handle; + __u16 state; + __u8 mode; + __u8 type; + bool out; + __u8 attempt; + __u8 dev_class[3]; + __u8 features[HCI_MAX_PAGES][8]; + __u16 pkt_type; + __u16 link_policy; + __u32 link_mode; + __u8 key_type; + __u8 auth_type; + __u8 sec_level; + __u8 pending_sec_level; + __u8 pin_length; + __u8 enc_key_size; + __u8 io_capability; + __u32 passkey_notify; + __u8 passkey_entered; + __u16 disc_timeout; + __u16 setting; + __u16 le_conn_min_interval; + __u16 le_conn_max_interval; + __s8 rssi; + __s8 tx_power; + __s8 max_tx_power; + unsigned long flags; + + unsigned long conn_info_timestamp; - struct timer_list disc_timer; - struct timer_list idle_timer; + __u8 remote_cap; + __u8 remote_auth; + __u8 remote_id; + bool flush_key; + + unsigned int sent; + + struct sk_buff_head data_q; + struct list_head chan_list; - struct work_struct work; + struct delayed_work disc_work; + struct delayed_work auto_accept_work; + struct delayed_work idle_work; + struct delayed_work le_conn_timeout; struct device dev; struct hci_dev *hdev; void *l2cap_data; void *sco_data; - void *priv; + void *smp_conn; + struct amp_mgr *amp_mgr; struct hci_conn *link; + + void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); + void (*security_cfm_cb) (struct hci_conn *conn, u8 status); + void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); +}; + +struct hci_chan { + struct list_head list; + __u16 handle; + struct hci_conn *conn; + struct sk_buff_head data_q; + unsigned int sent; + __u8 state; +}; + +struct hci_conn_params { + struct list_head list; + + bdaddr_t addr; + u8 addr_type; + + u16 conn_min_interval; + u16 conn_max_interval; + + enum { + HCI_AUTO_CONN_DISABLED, + HCI_AUTO_CONN_ALWAYS, + HCI_AUTO_CONN_LINK_LOSS, + } auto_connect; }; -extern struct hci_proto *hci_proto[]; extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; extern rwlock_t hci_cb_list_lock; -/* ----- Inquiry cache ----- */ -#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds -#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds +/* ----- HCI interface to upper protocols ----- */ +int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); +void l2cap_connect_cfm(struct hci_conn *hcon, u8 status); +int l2cap_disconn_ind(struct hci_conn *hcon); +void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); +int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); +int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); + +int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); +void sco_connect_cfm(struct hci_conn *hcon, __u8 status); +void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); +int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); -#define inquiry_cache_lock(c) spin_lock(&c->lock) -#define inquiry_cache_unlock(c) spin_unlock(&c->lock) -#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) -#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) +/* ----- Inquiry cache ----- */ +#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ +#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ -static inline void inquiry_cache_init(struct hci_dev *hdev) +static inline void discovery_init(struct hci_dev *hdev) { - struct inquiry_cache *c = &hdev->inq_cache; - spin_lock_init(&c->lock); - c->list = NULL; + hdev->discovery.state = DISCOVERY_STOPPED; + INIT_LIST_HEAD(&hdev->discovery.all); + INIT_LIST_HEAD(&hdev->discovery.unknown); + INIT_LIST_HEAD(&hdev->discovery.resolve); } +bool hci_discovery_active(struct hci_dev *hdev); + +void hci_discovery_set_state(struct hci_dev *hdev, int state); + static inline int inquiry_cache_empty(struct hci_dev *hdev) { - struct inquiry_cache *c = &hdev->inq_cache; - return (c->list == NULL); + return list_empty(&hdev->discovery.all); } static inline long inquiry_cache_age(struct hci_dev *hdev) { - struct inquiry_cache *c = &hdev->inq_cache; + struct discovery_state *c = &hdev->discovery; return jiffies - c->timestamp; } @@ -225,188 +492,325 @@ static inline long inquiry_entry_age(struct inquiry_entry *e) return jiffies - e->timestamp; } -struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); -void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr); +struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, + bdaddr_t *bdaddr); +struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, + bdaddr_t *bdaddr, + int state); +void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, + struct inquiry_entry *ie); +bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, + bool name_known, bool *ssp); +void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, + HCI_CONN_REAUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, + HCI_CONN_SCO_SETUP_PEND, + HCI_CONN_LE_SMP_PEND, + HCI_CONN_MGMT_CONNECTED, + HCI_CONN_SSP_ENABLED, + HCI_CONN_SC_ENABLED, + HCI_CONN_AES_CCM, + HCI_CONN_POWER_SAVE, + HCI_CONN_REMOTE_OOB, + HCI_CONN_6LOWPAN, }; -static inline void hci_conn_hash_init(struct hci_dev *hdev) +static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { - struct hci_conn_hash *h = &hdev->conn_hash; - INIT_LIST_HEAD(&h->list); - spin_lock_init(&h->lock); - h->acl_num = 0; - h->sco_num = 0; + struct hci_dev *hdev = conn->hdev; + return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); +} + +static inline bool hci_conn_sc_enabled(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && + test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_add(&c->list, &h->list); - if (c->type == ACL_LINK) + list_add_rcu(&c->list, &h->list); + switch (c->type) { + case ACL_LINK: h->acl_num++; - else + break; + case AMP_LINK: + h->amp_num++; + break; + case LE_LINK: + h->le_num++; + break; + case SCO_LINK: + case ESCO_LINK: h->sco_num++; + break; + } } static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_del(&c->list); - if (c->type == ACL_LINK) + + list_del_rcu(&c->list); + synchronize_rcu(); + + switch (c->type) { + case ACL_LINK: h->acl_num--; - else + break; + case AMP_LINK: + h->amp_num--; + break; + case LE_LINK: + h->le_num--; + break; + case SCO_LINK: + case ESCO_LINK: h->sco_num--; + break; + } +} + +static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + switch (type) { + case ACL_LINK: + return h->acl_num; + case AMP_LINK: + return h->amp_num; + case LE_LINK: + return h->le_num; + case SCO_LINK: + case ESCO_LINK: + return h->sco_num; + default: + return 0; + } +} + +static inline unsigned int hci_conn_count(struct hci_dev *hdev) +{ + struct hci_conn_hash *c = &hdev->conn_hash; + + return c->acl_num + c->amp_num + c->sco_num + c->le_num; } static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, - __u16 handle) + __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->handle == handle) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->handle == handle) { + rcu_read_unlock(); return c; + } } + rcu_read_unlock(); + return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, - __u8 type, bdaddr_t *ba) + __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->type == type && !bacmp(&c->dst, ba)) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); return c; + } } + + rcu_read_unlock(); + return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, - __u8 type, __u16 state) + __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->type == type && c->state == state) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && c->state == state) { + rcu_read_unlock(); return c; + } } + + rcu_read_unlock(); + return NULL; } -void hci_acl_connect(struct hci_conn *conn); -void hci_acl_disconn(struct hci_conn *conn, __u8 reason); -void hci_add_sco(struct hci_conn *conn, __u16 handle); -void hci_setup_sync(struct hci_conn *conn, __u16 handle); +void hci_disconnect(struct hci_conn *conn, __u8 reason); +bool hci_setup_sync(struct hci_conn *conn, __u16 handle); +void hci_sco_setup(struct hci_conn *conn, __u8 status); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); int hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); -int hci_conn_auth(struct hci_conn *conn); -int hci_conn_encrypt(struct hci_conn *conn); +struct hci_chan *hci_chan_create(struct hci_conn *conn); +void hci_chan_del(struct hci_chan *chan); +void hci_chan_list_flush(struct hci_conn *conn); +struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); + +struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, u8 sec_level, u8 auth_type); +struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + u8 sec_level, u8 auth_type); +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u16 setting); +int hci_conn_check_link_mode(struct hci_conn *conn); +int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); +int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); int hci_conn_change_link_key(struct hci_conn *conn); -int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); +int hci_conn_switch_role(struct hci_conn *conn, __u8 role); + +void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); + +void hci_le_conn_failed(struct hci_conn *conn, u8 status); + +/* + * hci_conn_get() and hci_conn_put() are used to control the life-time of an + * "hci_conn" object. They do not guarantee that the hci_conn object is running, + * working or anything else. They just guarantee that the object is available + * and can be dereferenced. So you can use its locks, local variables and any + * other constant data. + * Before accessing runtime data, you _must_ lock the object and then check that + * it is still running. As soon as you release the locks, the connection might + * get dropped, though. + * + * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control + * how long the underlying connection is held. So every channel that runs on the + * hci_conn object calls this to prevent the connection from disappearing. As + * long as you hold a device, you must also guarantee that you have a valid + * reference to the device via hci_conn_get() (or the initial reference from + * hci_conn_add()). + * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't + * break because nobody cares for that. But this means, we cannot use + * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). + */ + +static inline void hci_conn_get(struct hci_conn *conn) +{ + get_device(&conn->dev); +} -void hci_conn_enter_active_mode(struct hci_conn *conn); -void hci_conn_enter_sniff_mode(struct hci_conn *conn); +static inline void hci_conn_put(struct hci_conn *conn) +{ + put_device(&conn->dev); +} static inline void hci_conn_hold(struct hci_conn *conn) { + BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); + atomic_inc(&conn->refcnt); - del_timer(&conn->disc_timer); + cancel_delayed_work(&conn->disc_work); } -static inline void hci_conn_put(struct hci_conn *conn) +static inline void hci_conn_drop(struct hci_conn *conn) { + BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); + if (atomic_dec_and_test(&conn->refcnt)) { unsigned long timeo; - if (conn->type == ACL_LINK) { - del_timer(&conn->idle_timer); + + switch (conn->type) { + case ACL_LINK: + case LE_LINK: + cancel_delayed_work(&conn->idle_work); if (conn->state == BT_CONNECTED) { - timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT); + timeo = conn->disc_timeout; if (!conn->out) timeo *= 2; - } else + } else { timeo = msecs_to_jiffies(10); - } else + } + break; + + case AMP_LINK: + timeo = conn->disc_timeout; + break; + + default: timeo = msecs_to_jiffies(10); - mod_timer(&conn->disc_timer, jiffies + timeo); + break; + } + + cancel_delayed_work(&conn->disc_work); + queue_delayed_work(conn->hdev->workqueue, + &conn->disc_work, timeo); } } -/* ----- HCI tasks ----- */ -static inline void hci_sched_cmd(struct hci_dev *hdev) +/* ----- HCI Devices ----- */ +static inline void hci_dev_put(struct hci_dev *d) { - tasklet_schedule(&hdev->cmd_task); -} + BT_DBG("%s orig refcnt %d", d->name, + atomic_read(&d->dev.kobj.kref.refcount)); -static inline void hci_sched_rx(struct hci_dev *hdev) -{ - tasklet_schedule(&hdev->rx_task); + put_device(&d->dev); } -static inline void hci_sched_tx(struct hci_dev *hdev) +static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) { - tasklet_schedule(&hdev->tx_task); -} + BT_DBG("%s orig refcnt %d", d->name, + atomic_read(&d->dev.kobj.kref.refcount)); -/* ----- HCI Devices ----- */ -static inline void __hci_dev_put(struct hci_dev *d) -{ - if (atomic_dec_and_test(&d->refcnt)) - d->destruct(d); + get_device(&d->dev); + return d; } -static inline void hci_dev_put(struct hci_dev *d) -{ - __hci_dev_put(d); - module_put(d->owner); -} +#define hci_dev_lock(d) mutex_lock(&d->lock) +#define hci_dev_unlock(d) mutex_unlock(&d->lock) + +#define to_hci_dev(d) container_of(d, struct hci_dev, dev) +#define to_hci_conn(c) container_of(c, struct hci_conn, dev) -static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) +static inline void *hci_get_drvdata(struct hci_dev *hdev) { - atomic_inc(&d->refcnt); - return d; + return dev_get_drvdata(&hdev->dev); } -static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) +static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) { - if (try_module_get(d->owner)) - return __hci_dev_hold(d); - return NULL; + dev_set_drvdata(&hdev->dev, data); } -#define hci_dev_lock(d) spin_lock(&d->lock) -#define hci_dev_unlock(d) spin_unlock(&d->lock) -#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) -#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) - struct hci_dev *hci_dev_get(int index); -struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); +struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); -int hci_unregister_dev(struct hci_dev *hdev); +void hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); @@ -418,220 +822,479 @@ int hci_get_dev_list(void __user *arg); int hci_get_dev_info(void __user *arg); int hci_get_conn_list(void __user *arg); int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); +int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); -void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); - -/* Receive frame from HCI drivers */ -static inline int hci_recv_frame(struct sk_buff *skb) -{ - struct hci_dev *hdev = (struct hci_dev *) skb->dev; - if (!hdev || (!test_bit(HCI_UP, &hdev->flags) - && !test_bit(HCI_INIT, &hdev->flags))) { - kfree_skb(skb); - return -ENXIO; - } - - /* Incomming skb */ - bt_cb(skb)->incoming = 1; - - /* Time stamp */ - __net_timestamp(skb); +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type); +int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); +int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); + +struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type); +void hci_white_list_clear(struct hci_dev *hdev); +int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); +int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); + +struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type); +int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, + u8 auto_connect, u16 conn_min_interval, + u16 conn_max_interval); +void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); +void hci_conn_params_clear(struct hci_dev *hdev); + +struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type); +void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); +void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); +void hci_pend_le_conns_clear(struct hci_dev *hdev); + +void hci_update_background_scan(struct hci_dev *hdev); + +void hci_uuids_clear(struct hci_dev *hdev); + +void hci_link_keys_clear(struct hci_dev *hdev); +struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); +int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, + bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len); +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, + bool master); +struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 type, u8 authenticated, + u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); +struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, bool master); +int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); +void hci_smp_ltks_clear(struct hci_dev *hdev); +int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); + +struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); +struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type); +struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 val[16], bdaddr_t *rpa); +void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); +void hci_smp_irks_clear(struct hci_dev *hdev); + +void hci_remote_oob_data_clear(struct hci_dev *hdev); +struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, + bdaddr_t *bdaddr); +int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *hash, u8 *randomizer); +int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *hash192, u8 *randomizer192, + u8 *hash256, u8 *randomizer256); +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); - /* Queue frame for rx task */ - skb_queue_tail(&hdev->rx_q, skb); - hci_sched_rx(hdev); - return 0; -} +void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); +int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); +int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); -int hci_register_sysfs(struct hci_dev *hdev); -void hci_unregister_sysfs(struct hci_dev *hdev); +void hci_init_sysfs(struct hci_dev *hdev); +void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); -#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) +#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) /* ----- LMP capabilities ----- */ -#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) -#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) -#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) -#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) -#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO) +#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) +#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) +#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) +#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) +#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) +#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) +#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) +#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) +#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) +#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) +#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) +#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) +#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) +#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) +#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) +#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) +#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) +#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) +#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) + +/* ----- Extended LMP capabilities ----- */ +#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) +#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE) +#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) +#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) +#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) +#define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) + +/* ----- Host capabilities ----- */ +#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) +#define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) +#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) +#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) /* ----- HCI protocols ----- */ -struct hci_proto { - char *name; - unsigned int id; - unsigned long flags; +#define HCI_PROTO_DEFER 0x01 - void *priv; - - int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); - int (*connect_cfm) (struct hci_conn *conn, __u8 status); - int (*disconn_ind) (struct hci_conn *conn, __u8 reason); - int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); - int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); - int (*auth_cfm) (struct hci_conn *conn, __u8 status); - int (*encrypt_cfm) (struct hci_conn *conn, __u8 status); -}; - -static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) +static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 type, __u8 *flags) { - register struct hci_proto *hp; - int mask = 0; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->connect_ind) - mask |= hp->connect_ind(hdev, bdaddr, type); + switch (type) { + case ACL_LINK: + return l2cap_connect_ind(hdev, bdaddr); - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->connect_ind) - mask |= hp->connect_ind(hdev, bdaddr, type); + case SCO_LINK: + case ESCO_LINK: + return sco_connect_ind(hdev, bdaddr, flags); - return mask; + default: + BT_ERR("unknown link type %d", type); + return -EINVAL; + } } static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) { - register struct hci_proto *hp; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->connect_cfm) - hp->connect_cfm(conn, status); + switch (conn->type) { + case ACL_LINK: + case LE_LINK: + l2cap_connect_cfm(conn, status); + break; + + case SCO_LINK: + case ESCO_LINK: + sco_connect_cfm(conn, status); + break; + + default: + BT_ERR("unknown link type %d", conn->type); + break; + } - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->connect_cfm) - hp->connect_cfm(conn, status); + if (conn->connect_cfm_cb) + conn->connect_cfm_cb(conn, status); } -static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason) +static inline int hci_proto_disconn_ind(struct hci_conn *conn) { - register struct hci_proto *hp; + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return HCI_ERROR_REMOTE_USER_TERM; + + return l2cap_disconn_ind(conn); +} - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->disconn_ind) - hp->disconn_ind(conn, reason); +static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) +{ + switch (conn->type) { + case ACL_LINK: + case LE_LINK: + l2cap_disconn_cfm(conn, reason); + break; + + case SCO_LINK: + case ESCO_LINK: + sco_disconn_cfm(conn, reason); + break; + + /* L2CAP would be handled for BREDR chan */ + case AMP_LINK: + break; + + default: + BT_ERR("unknown link type %d", conn->type); + break; + } - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->disconn_ind) - hp->disconn_ind(conn, reason); + if (conn->disconn_cfm_cb) + conn->disconn_cfm_cb(conn, reason); } static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) { - register struct hci_proto *hp; + __u8 encrypt; + + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return; - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->auth_cfm) - hp->auth_cfm(conn, status); + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) + return; - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->auth_cfm) - hp->auth_cfm(conn, status); + encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; + l2cap_security_cfm(conn, status, encrypt); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } -static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status) +static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, + __u8 encrypt) { - register struct hci_proto *hp; + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return; - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->encrypt_cfm) - hp->encrypt_cfm(conn, status); + l2cap_security_cfm(conn, status, encrypt); - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->encrypt_cfm) - hp->encrypt_cfm(conn, status); + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } -int hci_register_proto(struct hci_proto *hproto); -int hci_unregister_proto(struct hci_proto *hproto); - /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; char *name; - void (*auth_cfm) (struct hci_conn *conn, __u8 status); - void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); + void (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { - struct list_head *p; + struct hci_cb *cb; + __u8 encrypt; hci_proto_auth_cfm(conn, status); - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); - if (cb->auth_cfm) - cb->auth_cfm(conn, status); + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) + return; + + encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; + + read_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, + __u8 encrypt) { - struct list_head *p; + struct hci_cb *cb; + + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; + + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; - hci_proto_encrypt_cfm(conn, status); + hci_proto_encrypt_cfm(conn, status, encrypt); - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); - if (cb->encrypt_cfm) - cb->encrypt_cfm(conn, status, encrypt); + read_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { - struct list_head *p; + struct hci_cb *cb; - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); + read_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } -static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) +static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, + __u8 role) { - struct list_head *p; + struct hci_cb *cb; - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); + read_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); +} + +static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type) +{ + size_t parsed = 0; + + if (data_len < 2) + return false; + + while (parsed < data_len - 1) { + u8 field_len = data[0]; + + if (field_len == 0) + break; + + parsed += field_len + 1; + + if (parsed > data_len) + break; + + if (data[1] == type) + return true; + + data += field_len + 1; + } + + return false; +} + +static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) +{ + if (addr_type != 0x01) + return false; + + if ((bdaddr->b[5] & 0xc0) == 0x40) + return true; + + return false; +} + +static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 addr_type) +{ + if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) + return NULL; + + return hci_find_irk_by_rpa(hdev, bdaddr); } int hci_register_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb); -int hci_register_notifier(struct notifier_block *nb); -int hci_unregister_notifier(struct notifier_block *nb); +struct hci_request { + struct hci_dev *hdev; + struct sk_buff_head cmd_q; + + /* If something goes wrong when building the HCI request, the error + * value is stored in this field. + */ + int err; +}; -int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); -int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); -int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); +void hci_req_init(struct hci_request *req, struct hci_dev *hdev); +int hci_req_run(struct hci_request *req, hci_req_complete_t complete); +void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param); +void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event); +void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status); -void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); +void hci_req_add_le_scan_disable(struct hci_request *req); +void hci_req_add_le_passive_scan(struct hci_request *req); -void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); +struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout); +struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout); + +int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, + const void *param); +void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); +void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); + +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); +void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk); +void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); + +void hci_sock_dev_event(struct hci_dev *hdev, int event); + +/* Management interface */ +#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) +#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ + BIT(BDADDR_LE_RANDOM)) +#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ + BIT(BDADDR_LE_PUBLIC) | \ + BIT(BDADDR_LE_RANDOM)) + +/* These LE scan and inquiry parameters were chosen according to LE General + * Discovery Procedure specification. + */ +#define DISCOV_LE_SCAN_WIN 0x12 +#define DISCOV_LE_SCAN_INT 0x12 +#define DISCOV_LE_TIMEOUT 10240 /* msec */ +#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ +#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 +#define DISCOV_BREDR_INQUIRY_LEN 0x08 + +int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); +void mgmt_index_added(struct hci_dev *hdev); +void mgmt_index_removed(struct hci_dev *hdev); +void mgmt_set_powered_failed(struct hci_dev *hdev, int err); +int mgmt_powered(struct hci_dev *hdev, u8 powered); +void mgmt_discoverable_timeout(struct hci_dev *hdev); +void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); +void mgmt_connectable(struct hci_dev *hdev, u8 connectable); +void mgmt_advertising(struct hci_dev *hdev, u8 advertising); +void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); +void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + bool persistent); +void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u32 flags, u8 *name, u8 name_len, + u8 *dev_class); +void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 reason, + bool mgmt_connected); +void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 status); +void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); +void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status); +void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status); +int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u32 value, + u8 confirm_hint); +int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type); +int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status); +int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u32 passkey, + u8 entered); +void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 status); +void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); +void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); +void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); +void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, + u8 status); +void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); +void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, + u8 *randomizer192, u8 *hash256, + u8 *randomizer256, u8 status); +void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, + u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp, + u8 scan_rsp_len); +void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, s8 rssi, u8 *name, u8 name_len); +void mgmt_discovering(struct hci_dev *hdev, u8 discovering); +int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); +int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); +void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); +void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); +void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, + bool persistent); +void mgmt_reenable_advertising(struct hci_dev *hdev); +void mgmt_smp_complete(struct hci_conn *conn, bool complete); /* HCI info for socket */ #define hci_pi(sk) ((struct hci_pinfo *) sk) @@ -641,6 +1304,7 @@ struct hci_pinfo { struct hci_dev *hdev; struct hci_filter filter; __u32 cmsg_mask; + unsigned short channel; }; /* HCI security filter */ @@ -657,9 +1321,21 @@ struct hci_sec_filter { #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 -#define hci_req_lock(d) down(&d->req_lock) -#define hci_req_unlock(d) up(&d->req_lock) +#define hci_req_lock(d) mutex_lock(&d->req_lock) +#define hci_req_unlock(d) mutex_unlock(&d->req_lock) + +void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, + u16 latency, u16 to_multiplier); +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, + __u8 ltk[16]); + +int hci_update_random_address(struct hci_request *req, bool require_privacy, + u8 *own_addr_type); +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *bdaddr_type); -void hci_req_complete(struct hci_dev *hdev, int result); +#define SCO_AIRMODE_MASK 0x0003 +#define SCO_AIRMODE_CVSD 0x0000 +#define SCO_AIRMODE_TRANSP 0x0003 #endif /* __HCI_CORE_H */ diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h new file mode 100644 index 00000000000..77d1e576418 --- /dev/null +++ b/include/net/bluetooth/hci_mon.h @@ -0,0 +1,51 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2011-2012 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HCI_MON_H +#define __HCI_MON_H + +struct hci_mon_hdr { + __le16 opcode; + __le16 index; + __le16 len; +} __packed; +#define HCI_MON_HDR_SIZE 6 + +#define HCI_MON_NEW_INDEX 0 +#define HCI_MON_DEL_INDEX 1 +#define HCI_MON_COMMAND_PKT 2 +#define HCI_MON_EVENT_PKT 3 +#define HCI_MON_ACL_TX_PKT 4 +#define HCI_MON_ACL_RX_PKT 5 +#define HCI_MON_SCO_TX_PKT 6 +#define HCI_MON_SCO_RX_PKT 7 + +struct hci_mon_new_index { + __u8 type; + __u8 bus; + bdaddr_t bdaddr; + char name[8]; +} __packed; +#define HCI_MON_NEW_INDEX_SIZE 16 + +#endif /* __HCI_MON_H */ diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 73e115bc12d..4abdcb220e3 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -1,6 +1,8 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> + Copyright (C) 2010 Google Inc. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -12,31 +14,56 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __L2CAP_H #define __L2CAP_H -/* L2CAP defaults */ -#define L2CAP_DEFAULT_MTU 672 -#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF +#include <asm/unaligned.h> -#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ -#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ +/* L2CAP defaults */ +#define L2CAP_DEFAULT_MTU 672 +#define L2CAP_DEFAULT_MIN_MTU 48 +#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF +#define L2CAP_EFS_DEFAULT_FLUSH_TO 0xFFFFFFFF +#define L2CAP_DEFAULT_TX_WINDOW 63 +#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF +#define L2CAP_DEFAULT_MAX_TX 3 +#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ +#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ +#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */ +#define L2CAP_DEFAULT_ACK_TO 200 +#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF +#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF +#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF +#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ +#define L2CAP_LE_MIN_MTU 23 + +#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) +#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) +#define L2CAP_ENC_TIMEOUT msecs_to_jiffies(5000) +#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000) +#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) +#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000) +#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000) + +#define L2CAP_A2MP_DEFAULT_MTU 670 /* L2CAP socket address */ struct sockaddr_l2 { sa_family_t l2_family; __le16 l2_psm; bdaddr_t l2_bdaddr; + __le16 l2_cid; + __u8 l2_bdaddr_type; }; /* L2CAP socket options */ @@ -46,6 +73,9 @@ struct l2cap_options { __u16 imtu; __u16 flush_to; __u8 mode; + __u8 fcs; + __u8 max_tx; + __u16 txwin_size; }; #define L2CAP_CONNINFO 0x02 @@ -61,91 +91,228 @@ struct l2cap_conninfo { #define L2CAP_LM_TRUSTED 0x0008 #define L2CAP_LM_RELIABLE 0x0010 #define L2CAP_LM_SECURE 0x0020 +#define L2CAP_LM_FIPS 0x0040 /* L2CAP command codes */ -#define L2CAP_COMMAND_REJ 0x01 -#define L2CAP_CONN_REQ 0x02 -#define L2CAP_CONN_RSP 0x03 -#define L2CAP_CONF_REQ 0x04 -#define L2CAP_CONF_RSP 0x05 -#define L2CAP_DISCONN_REQ 0x06 -#define L2CAP_DISCONN_RSP 0x07 -#define L2CAP_ECHO_REQ 0x08 -#define L2CAP_ECHO_RSP 0x09 -#define L2CAP_INFO_REQ 0x0a -#define L2CAP_INFO_RSP 0x0b +#define L2CAP_COMMAND_REJ 0x01 +#define L2CAP_CONN_REQ 0x02 +#define L2CAP_CONN_RSP 0x03 +#define L2CAP_CONF_REQ 0x04 +#define L2CAP_CONF_RSP 0x05 +#define L2CAP_DISCONN_REQ 0x06 +#define L2CAP_DISCONN_RSP 0x07 +#define L2CAP_ECHO_REQ 0x08 +#define L2CAP_ECHO_RSP 0x09 +#define L2CAP_INFO_REQ 0x0a +#define L2CAP_INFO_RSP 0x0b +#define L2CAP_CREATE_CHAN_REQ 0x0c +#define L2CAP_CREATE_CHAN_RSP 0x0d +#define L2CAP_MOVE_CHAN_REQ 0x0e +#define L2CAP_MOVE_CHAN_RSP 0x0f +#define L2CAP_MOVE_CHAN_CFM 0x10 +#define L2CAP_MOVE_CHAN_CFM_RSP 0x11 +#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12 +#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13 +#define L2CAP_LE_CONN_REQ 0x14 +#define L2CAP_LE_CONN_RSP 0x15 +#define L2CAP_LE_CREDITS 0x16 + +/* L2CAP extended feature mask */ +#define L2CAP_FEAT_FLOWCTL 0x00000001 +#define L2CAP_FEAT_RETRANS 0x00000002 +#define L2CAP_FEAT_BIDIR_QOS 0x00000004 +#define L2CAP_FEAT_ERTM 0x00000008 +#define L2CAP_FEAT_STREAMING 0x00000010 +#define L2CAP_FEAT_FCS 0x00000020 +#define L2CAP_FEAT_EXT_FLOW 0x00000040 +#define L2CAP_FEAT_FIXED_CHAN 0x00000080 +#define L2CAP_FEAT_EXT_WINDOW 0x00000100 +#define L2CAP_FEAT_UCD 0x00000200 + +/* L2CAP checksum option */ +#define L2CAP_FCS_NONE 0x00 +#define L2CAP_FCS_CRC16 0x01 + +/* L2CAP fixed channels */ +#define L2CAP_FC_L2CAP 0x02 +#define L2CAP_FC_CONNLESS 0x04 +#define L2CAP_FC_A2MP 0x08 +#define L2CAP_FC_6LOWPAN 0x3e /* reserved and temporary value */ + +/* L2CAP Control Field bit masks */ +#define L2CAP_CTRL_SAR 0xC000 +#define L2CAP_CTRL_REQSEQ 0x3F00 +#define L2CAP_CTRL_TXSEQ 0x007E +#define L2CAP_CTRL_SUPERVISE 0x000C + +#define L2CAP_CTRL_RETRANS 0x0080 +#define L2CAP_CTRL_FINAL 0x0080 +#define L2CAP_CTRL_POLL 0x0010 +#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ + +#define L2CAP_CTRL_TXSEQ_SHIFT 1 +#define L2CAP_CTRL_SUPER_SHIFT 2 +#define L2CAP_CTRL_POLL_SHIFT 4 +#define L2CAP_CTRL_FINAL_SHIFT 7 +#define L2CAP_CTRL_REQSEQ_SHIFT 8 +#define L2CAP_CTRL_SAR_SHIFT 14 + +/* L2CAP Extended Control Field bit mask */ +#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000 +#define L2CAP_EXT_CTRL_SAR 0x00030000 +#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000 +#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC + +#define L2CAP_EXT_CTRL_POLL 0x00040000 +#define L2CAP_EXT_CTRL_FINAL 0x00000002 +#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */ + +#define L2CAP_EXT_CTRL_FINAL_SHIFT 1 +#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2 +#define L2CAP_EXT_CTRL_SAR_SHIFT 16 +#define L2CAP_EXT_CTRL_SUPER_SHIFT 16 +#define L2CAP_EXT_CTRL_POLL_SHIFT 18 +#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18 + +/* L2CAP Supervisory Function */ +#define L2CAP_SUPER_RR 0x00 +#define L2CAP_SUPER_REJ 0x01 +#define L2CAP_SUPER_RNR 0x02 +#define L2CAP_SUPER_SREJ 0x03 + +/* L2CAP Segmentation and Reassembly */ +#define L2CAP_SAR_UNSEGMENTED 0x00 +#define L2CAP_SAR_START 0x01 +#define L2CAP_SAR_END 0x02 +#define L2CAP_SAR_CONTINUE 0x03 + +/* L2CAP Command rej. reasons */ +#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 +#define L2CAP_REJ_MTU_EXCEEDED 0x0001 +#define L2CAP_REJ_INVALID_CID 0x0002 /* L2CAP structures */ struct l2cap_hdr { __le16 len; __le16 cid; -} __attribute__ ((packed)); +} __packed; #define L2CAP_HDR_SIZE 4 +#define L2CAP_ENH_HDR_SIZE 6 +#define L2CAP_EXT_HDR_SIZE 8 + +#define L2CAP_FCS_SIZE 2 +#define L2CAP_SDULEN_SIZE 2 +#define L2CAP_PSMLEN_SIZE 2 +#define L2CAP_ENH_CTRL_SIZE 2 +#define L2CAP_EXT_CTRL_SIZE 4 struct l2cap_cmd_hdr { __u8 code; __u8 ident; __le16 len; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CMD_HDR_SIZE 4 -struct l2cap_cmd_rej { +struct l2cap_cmd_rej_unk { __le16 reason; -} __attribute__ ((packed)); +} __packed; + +struct l2cap_cmd_rej_mtu { + __le16 reason; + __le16 max_mtu; +} __packed; + +struct l2cap_cmd_rej_cid { + __le16 reason; + __le16 scid; + __le16 dcid; +} __packed; struct l2cap_conn_req { __le16 psm; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_conn_rsp { __le16 dcid; __le16 scid; __le16 result; __le16 status; -} __attribute__ ((packed)); - -/* connect result */ -#define L2CAP_CR_SUCCESS 0x0000 -#define L2CAP_CR_PEND 0x0001 -#define L2CAP_CR_BAD_PSM 0x0002 -#define L2CAP_CR_SEC_BLOCK 0x0003 -#define L2CAP_CR_NO_MEM 0x0004 - -/* connect status */ -#define L2CAP_CS_NO_INFO 0x0000 -#define L2CAP_CS_AUTHEN_PEND 0x0001 -#define L2CAP_CS_AUTHOR_PEND 0x0002 +} __packed; + +/* protocol/service multiplexer (PSM) */ +#define L2CAP_PSM_SDP 0x0001 +#define L2CAP_PSM_RFCOMM 0x0003 +#define L2CAP_PSM_3DSP 0x0021 + +/* channel identifier */ +#define L2CAP_CID_SIGNALING 0x0001 +#define L2CAP_CID_CONN_LESS 0x0002 +#define L2CAP_CID_A2MP 0x0003 +#define L2CAP_CID_ATT 0x0004 +#define L2CAP_CID_LE_SIGNALING 0x0005 +#define L2CAP_CID_SMP 0x0006 +#define L2CAP_CID_DYN_START 0x0040 +#define L2CAP_CID_DYN_END 0xffff +#define L2CAP_CID_LE_DYN_END 0x007f + +/* connect/create channel results */ +#define L2CAP_CR_SUCCESS 0x0000 +#define L2CAP_CR_PEND 0x0001 +#define L2CAP_CR_BAD_PSM 0x0002 +#define L2CAP_CR_SEC_BLOCK 0x0003 +#define L2CAP_CR_NO_MEM 0x0004 +#define L2CAP_CR_BAD_AMP 0x0005 +#define L2CAP_CR_AUTHENTICATION 0x0005 +#define L2CAP_CR_AUTHORIZATION 0x0006 +#define L2CAP_CR_BAD_KEY_SIZE 0x0007 +#define L2CAP_CR_ENCRYPTION 0x0008 + +/* connect/create channel status */ +#define L2CAP_CS_NO_INFO 0x0000 +#define L2CAP_CS_AUTHEN_PEND 0x0001 +#define L2CAP_CS_AUTHOR_PEND 0x0002 struct l2cap_conf_req { __le16 dcid; __le16 flags; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct l2cap_conf_rsp { __le16 scid; __le16 flags; __le16 result; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CONF_SUCCESS 0x0000 #define L2CAP_CONF_UNACCEPT 0x0001 #define L2CAP_CONF_REJECT 0x0002 #define L2CAP_CONF_UNKNOWN 0x0003 +#define L2CAP_CONF_PENDING 0x0004 +#define L2CAP_CONF_EFS_REJECT 0x0005 + +/* configuration req/rsp continuation flag */ +#define L2CAP_CONF_FLAG_CONTINUATION 0x0001 struct l2cap_conf_opt { __u8 type; __u8 len; __u8 val[0]; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CONF_OPT_SIZE 2 +#define L2CAP_CONF_HINT 0x80 +#define L2CAP_CONF_MASK 0x7f + #define L2CAP_CONF_MTU 0x01 #define L2CAP_CONF_FLUSH_TO 0x02 #define L2CAP_CONF_QOS 0x03 #define L2CAP_CONF_RFC 0x04 +#define L2CAP_CONF_FCS 0x05 +#define L2CAP_CONF_EFS 0x06 +#define L2CAP_CONF_EWS 0x07 #define L2CAP_CONF_MAX_SIZE 22 @@ -156,110 +323,575 @@ struct l2cap_conf_rfc { __le16 retrans_timeout; __le16 monitor_timeout; __le16 max_pdu_size; -} __attribute__ ((packed)); +} __packed; #define L2CAP_MODE_BASIC 0x00 #define L2CAP_MODE_RETRANS 0x01 #define L2CAP_MODE_FLOWCTL 0x02 +#define L2CAP_MODE_ERTM 0x03 +#define L2CAP_MODE_STREAMING 0x04 + +/* Unlike the above this one doesn't actually map to anything that would + * ever be sent over the air. Therefore, use a value that's unlikely to + * ever be used in the BR/EDR configuration phase. + */ +#define L2CAP_MODE_LE_FLOWCTL 0x80 + +struct l2cap_conf_efs { + __u8 id; + __u8 stype; + __le16 msdu; + __le32 sdu_itime; + __le32 acc_lat; + __le32 flush_to; +} __packed; + +#define L2CAP_SERV_NOTRAFIC 0x00 +#define L2CAP_SERV_BESTEFFORT 0x01 +#define L2CAP_SERV_GUARANTEED 0x02 + +#define L2CAP_BESTEFFORT_ID 0x01 struct l2cap_disconn_req { __le16 dcid; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_disconn_rsp { __le16 dcid; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_info_req { __le16 type; -} __attribute__ ((packed)); +} __packed; struct l2cap_info_rsp { __le16 type; __le16 result; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; -/* info type */ -#define L2CAP_IT_CL_MTU 0x0001 -#define L2CAP_IT_FEAT_MASK 0x0002 +struct l2cap_create_chan_req { + __le16 psm; + __le16 scid; + __u8 amp_id; +} __packed; -/* info result */ -#define L2CAP_IR_SUCCESS 0x0000 -#define L2CAP_IR_NOTSUPP 0x0001 - -/* ----- L2CAP connections ----- */ -struct l2cap_chan_list { - struct sock *head; - rwlock_t lock; - long num; -}; +struct l2cap_create_chan_rsp { + __le16 dcid; + __le16 scid; + __le16 result; + __le16 status; +} __packed; -struct l2cap_conn { - struct hci_conn *hcon; +struct l2cap_move_chan_req { + __le16 icid; + __u8 dest_amp_id; +} __packed; - bdaddr_t *dst; - bdaddr_t *src; +struct l2cap_move_chan_rsp { + __le16 icid; + __le16 result; +} __packed; + +#define L2CAP_MR_SUCCESS 0x0000 +#define L2CAP_MR_PEND 0x0001 +#define L2CAP_MR_BAD_ID 0x0002 +#define L2CAP_MR_SAME_ID 0x0003 +#define L2CAP_MR_NOT_SUPP 0x0004 +#define L2CAP_MR_COLLISION 0x0005 +#define L2CAP_MR_NOT_ALLOWED 0x0006 + +struct l2cap_move_chan_cfm { + __le16 icid; + __le16 result; +} __packed; - unsigned int mtu; +#define L2CAP_MC_CONFIRMED 0x0000 +#define L2CAP_MC_UNCONFIRMED 0x0001 - __u32 feat_mask; +struct l2cap_move_chan_cfm_rsp { + __le16 icid; +} __packed; - __u8 info_state; - __u8 info_ident; +/* info type */ +#define L2CAP_IT_CL_MTU 0x0001 +#define L2CAP_IT_FEAT_MASK 0x0002 +#define L2CAP_IT_FIXED_CHAN 0x0003 - struct timer_list info_timer; +/* info result */ +#define L2CAP_IR_SUCCESS 0x0000 +#define L2CAP_IR_NOTSUPP 0x0001 + +struct l2cap_conn_param_update_req { + __le16 min; + __le16 max; + __le16 latency; + __le16 to_multiplier; +} __packed; + +struct l2cap_conn_param_update_rsp { + __le16 result; +} __packed; + +/* Connection Parameters result */ +#define L2CAP_CONN_PARAM_ACCEPTED 0x0000 +#define L2CAP_CONN_PARAM_REJECTED 0x0001 - spinlock_t lock; +#define L2CAP_LE_MAX_CREDITS 10 +#define L2CAP_LE_DEFAULT_MPS 230 - struct sk_buff *rx_skb; - __u32 rx_len; - __u8 rx_ident; - __u8 tx_ident; +struct l2cap_le_conn_req { + __le16 psm; + __le16 scid; + __le16 mtu; + __le16 mps; + __le16 credits; +} __packed; + +struct l2cap_le_conn_rsp { + __le16 dcid; + __le16 mtu; + __le16 mps; + __le16 credits; + __le16 result; +} __packed; - struct l2cap_chan_list chan_list; +struct l2cap_le_credits { + __le16 cid; + __le16 credits; +} __packed; + +/* ----- L2CAP channels and connections ----- */ +struct l2cap_seq_list { + __u16 head; + __u16 tail; + __u16 mask; + __u16 *list; }; -#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 -#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02 +#define L2CAP_SEQ_LIST_CLEAR 0xFFFF +#define L2CAP_SEQ_LIST_TAIL 0x8000 -/* ----- L2CAP channel and socket info ----- */ -#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) +struct l2cap_chan { + struct l2cap_conn *conn; + struct hci_conn *hs_hcon; + struct hci_chan *hs_hchan; + struct kref kref; -struct l2cap_pinfo { - struct bt_sock bt; + __u8 state; + + bdaddr_t dst; + __u8 dst_type; + bdaddr_t src; + __u8 src_type; __le16 psm; + __le16 sport; __u16 dcid; __u16 scid; __u16 imtu; __u16 omtu; __u16 flush_to; + __u8 mode; + __u8 chan_type; + __u8 chan_policy; - __u32 link_mode; + __u8 sec_level; + + __u8 ident; __u8 conf_req[64]; __u8 conf_len; - __u8 conf_state; - __u8 conf_retry; + __u8 num_conf_req; + __u8 num_conf_rsp; + + __u8 fcs; + + __u16 tx_win; + __u16 tx_win_max; + __u16 ack_win; + __u8 max_tx; + __u16 retrans_timeout; + __u16 monitor_timeout; + __u16 mps; + + __u16 tx_credits; + __u16 rx_credits; + + __u8 tx_state; + __u8 rx_state; + + unsigned long conf_state; + unsigned long conn_state; + unsigned long flags; + + __u8 remote_amp_id; + __u8 local_amp_id; + __u8 move_id; + __u8 move_state; + __u8 move_role; + + __u16 next_tx_seq; + __u16 expected_ack_seq; + __u16 expected_tx_seq; + __u16 buffer_seq; + __u16 srej_save_reqseq; + __u16 last_acked_seq; + __u16 frames_sent; + __u16 unacked_frames; + __u8 retry_count; + __u16 sdu_len; + struct sk_buff *sdu; + struct sk_buff *sdu_last_frag; + + __u16 remote_tx_win; + __u8 remote_max_tx; + __u16 remote_mps; + + __u8 local_id; + __u8 local_stype; + __u16 local_msdu; + __u32 local_sdu_itime; + __u32 local_acc_lat; + __u32 local_flush_to; + + __u8 remote_id; + __u8 remote_stype; + __u16 remote_msdu; + __u32 remote_sdu_itime; + __u32 remote_acc_lat; + __u32 remote_flush_to; + + struct delayed_work chan_timer; + struct delayed_work retrans_timer; + struct delayed_work monitor_timer; + struct delayed_work ack_timer; + + struct sk_buff *tx_send_head; + struct sk_buff_head tx_q; + struct sk_buff_head srej_q; + struct l2cap_seq_list srej_list; + struct l2cap_seq_list retrans_list; + + struct list_head list; + struct list_head global_l; + + void *data; + struct l2cap_ops *ops; + struct mutex lock; +}; - __u8 ident; +struct l2cap_ops { + char *name; + + struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan); + int (*recv) (struct l2cap_chan * chan, + struct sk_buff *skb); + void (*teardown) (struct l2cap_chan *chan, int err); + void (*close) (struct l2cap_chan *chan); + void (*state_change) (struct l2cap_chan *chan, + int state, int err); + void (*ready) (struct l2cap_chan *chan); + void (*defer) (struct l2cap_chan *chan); + void (*resume) (struct l2cap_chan *chan); + void (*suspend) (struct l2cap_chan *chan); + void (*set_shutdown) (struct l2cap_chan *chan); + long (*get_sndtimeo) (struct l2cap_chan *chan); + struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, + unsigned long len, int nb); +}; - __le16 sport; +struct l2cap_conn { + struct hci_conn *hcon; + struct hci_chan *hchan; - struct l2cap_conn *conn; - struct sock *next_c; - struct sock *prev_c; + unsigned int mtu; + + __u32 feat_mask; + __u8 fixed_chan_mask; + bool hs_enabled; + + __u8 info_state; + __u8 info_ident; + + struct delayed_work info_timer; + + spinlock_t lock; + + struct sk_buff *rx_skb; + __u32 rx_len; + __u8 tx_ident; + + struct sk_buff_head pending_rx; + struct work_struct pending_rx_work; + + __u8 disc_reason; + + struct delayed_work security_timer; + struct smp_chan *smp_chan; + + struct list_head chan_l; + struct mutex chan_lock; + struct kref ref; + struct list_head users; +}; + +struct l2cap_user { + struct list_head list; + int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user); + void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user); +}; + +#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 +#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04 +#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08 + +#define L2CAP_CHAN_RAW 1 +#define L2CAP_CHAN_CONN_LESS 2 +#define L2CAP_CHAN_CONN_ORIENTED 3 +#define L2CAP_CHAN_FIXED 4 + +/* ----- L2CAP socket info ----- */ +#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) + +struct l2cap_pinfo { + struct bt_sock bt; + struct l2cap_chan *chan; + struct sk_buff *rx_busy_skb; +}; + +enum { + CONF_REQ_SENT, + CONF_INPUT_DONE, + CONF_OUTPUT_DONE, + CONF_MTU_DONE, + CONF_MODE_DONE, + CONF_CONNECT_PEND, + CONF_RECV_NO_FCS, + CONF_STATE2_DEVICE, + CONF_EWS_RECV, + CONF_LOC_CONF_PEND, + CONF_REM_CONF_PEND, + CONF_NOT_COMPLETE, +}; + +#define L2CAP_CONF_MAX_CONF_REQ 2 +#define L2CAP_CONF_MAX_CONF_RSP 2 + +enum { + CONN_SREJ_SENT, + CONN_WAIT_F, + CONN_SREJ_ACT, + CONN_SEND_PBIT, + CONN_REMOTE_BUSY, + CONN_LOCAL_BUSY, + CONN_REJ_ACT, + CONN_SEND_FBIT, + CONN_RNR_SENT, +}; + +/* Definitions for flags in l2cap_chan */ +enum { + FLAG_ROLE_SWITCH, + FLAG_FORCE_ACTIVE, + FLAG_FORCE_RELIABLE, + FLAG_FLUSHABLE, + FLAG_EXT_CTRL, + FLAG_EFS_ENABLE, + FLAG_DEFER_SETUP, + FLAG_LE_CONN_REQ_SENT, }; -#define L2CAP_CONF_REQ_SENT 0x01 -#define L2CAP_CONF_INPUT_DONE 0x02 -#define L2CAP_CONF_OUTPUT_DONE 0x04 +enum { + L2CAP_TX_STATE_XMIT, + L2CAP_TX_STATE_WAIT_F, +}; + +enum { + L2CAP_RX_STATE_RECV, + L2CAP_RX_STATE_SREJ_SENT, + L2CAP_RX_STATE_MOVE, + L2CAP_RX_STATE_WAIT_P, + L2CAP_RX_STATE_WAIT_F, +}; + +enum { + L2CAP_TXSEQ_EXPECTED, + L2CAP_TXSEQ_EXPECTED_SREJ, + L2CAP_TXSEQ_UNEXPECTED, + L2CAP_TXSEQ_UNEXPECTED_SREJ, + L2CAP_TXSEQ_DUPLICATE, + L2CAP_TXSEQ_DUPLICATE_SREJ, + L2CAP_TXSEQ_INVALID, + L2CAP_TXSEQ_INVALID_IGNORE, +}; -#define L2CAP_CONF_MAX_RETRIES 2 +enum { + L2CAP_EV_DATA_REQUEST, + L2CAP_EV_LOCAL_BUSY_DETECTED, + L2CAP_EV_LOCAL_BUSY_CLEAR, + L2CAP_EV_RECV_REQSEQ_AND_FBIT, + L2CAP_EV_RECV_FBIT, + L2CAP_EV_RETRANS_TO, + L2CAP_EV_MONITOR_TO, + L2CAP_EV_EXPLICIT_POLL, + L2CAP_EV_RECV_IFRAME, + L2CAP_EV_RECV_RR, + L2CAP_EV_RECV_REJ, + L2CAP_EV_RECV_RNR, + L2CAP_EV_RECV_SREJ, + L2CAP_EV_RECV_FRAME, +}; + +enum { + L2CAP_MOVE_ROLE_NONE, + L2CAP_MOVE_ROLE_INITIATOR, + L2CAP_MOVE_ROLE_RESPONDER, +}; + +enum { + L2CAP_MOVE_STABLE, + L2CAP_MOVE_WAIT_REQ, + L2CAP_MOVE_WAIT_RSP, + L2CAP_MOVE_WAIT_RSP_SUCCESS, + L2CAP_MOVE_WAIT_CONFIRM, + L2CAP_MOVE_WAIT_CONFIRM_RSP, + L2CAP_MOVE_WAIT_LOGICAL_COMP, + L2CAP_MOVE_WAIT_LOGICAL_CFM, + L2CAP_MOVE_WAIT_LOCAL_BUSY, + L2CAP_MOVE_WAIT_PREPARE, +}; -void l2cap_load(void); +void l2cap_chan_hold(struct l2cap_chan *c); +void l2cap_chan_put(struct l2cap_chan *c); + +static inline void l2cap_chan_lock(struct l2cap_chan *chan) +{ + mutex_lock(&chan->lock); +} + +static inline void l2cap_chan_unlock(struct l2cap_chan *chan) +{ + mutex_unlock(&chan->lock); +} + +static inline void l2cap_set_timer(struct l2cap_chan *chan, + struct delayed_work *work, long timeout) +{ + BT_DBG("chan %p state %s timeout %ld", chan, + state_to_string(chan->state), timeout); + + /* If delayed work cancelled do not hold(chan) + since it is already done with previous set_timer */ + if (!cancel_delayed_work(work)) + l2cap_chan_hold(chan); + + schedule_delayed_work(work, timeout); +} + +static inline bool l2cap_clear_timer(struct l2cap_chan *chan, + struct delayed_work *work) +{ + bool ret; + + /* put(chan) if delayed work cancelled otherwise it + is done in delayed work function */ + ret = cancel_delayed_work(work); + if (ret) + l2cap_chan_put(chan); + + return ret; +} + +#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) +#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) +#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) +#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) +#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ + msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); +#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) + +static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2) +{ + if (seq1 >= seq2) + return seq1 - seq2; + else + return chan->tx_win_max + 1 - seq2 + seq1; +} + +static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq) +{ + return (seq + 1) % (chan->tx_win_max + 1); +} + +static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan) +{ + return NULL; +} + +static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) +{ +} + +static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) +{ +} + +static inline void l2cap_chan_no_defer(struct l2cap_chan *chan) +{ +} + +static inline void l2cap_chan_no_resume(struct l2cap_chan *chan) +{ +} + +static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan) +{ +} + +static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan) +{ + return 0; +} + +extern bool disable_ertm; + +int l2cap_init_sockets(void); +void l2cap_cleanup_sockets(void); +bool l2cap_is_socket(struct socket *sock); + +void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan); +void __l2cap_connect_rsp_defer(struct l2cap_chan *chan); + +int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm); +int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid); + +struct l2cap_chan *l2cap_chan_create(void); +void l2cap_chan_close(struct l2cap_chan *chan, int reason); +int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, + bdaddr_t *dst, u8 dst_type); +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + u32 priority); +void l2cap_chan_busy(struct l2cap_chan *chan, int busy); +int l2cap_chan_check_security(struct l2cap_chan *chan); +void l2cap_chan_set_defaults(struct l2cap_chan *chan); +int l2cap_ertm_init(struct l2cap_chan *chan); +void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); +void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); +void l2cap_chan_del(struct l2cap_chan *chan, int err); +void l2cap_conn_update_id_addr(struct hci_conn *hcon); +void l2cap_send_conn_req(struct l2cap_chan *chan); +void l2cap_move_start(struct l2cap_chan *chan); +void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, + u8 status); +void __l2cap_physical_cfm(struct l2cap_chan *chan, int result); + +void l2cap_conn_get(struct l2cap_conn *conn); +void l2cap_conn_put(struct l2cap_conn *conn); + +int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user); +void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user); #endif /* __L2CAP_H */ diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h new file mode 100644 index 00000000000..bcffc9ae0c8 --- /dev/null +++ b/include/net/bluetooth/mgmt.h @@ -0,0 +1,580 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2010 Nokia Corporation + Copyright (C) 2011-2012 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#define MGMT_INDEX_NONE 0xFFFF + +#define MGMT_STATUS_SUCCESS 0x00 +#define MGMT_STATUS_UNKNOWN_COMMAND 0x01 +#define MGMT_STATUS_NOT_CONNECTED 0x02 +#define MGMT_STATUS_FAILED 0x03 +#define MGMT_STATUS_CONNECT_FAILED 0x04 +#define MGMT_STATUS_AUTH_FAILED 0x05 +#define MGMT_STATUS_NOT_PAIRED 0x06 +#define MGMT_STATUS_NO_RESOURCES 0x07 +#define MGMT_STATUS_TIMEOUT 0x08 +#define MGMT_STATUS_ALREADY_CONNECTED 0x09 +#define MGMT_STATUS_BUSY 0x0a +#define MGMT_STATUS_REJECTED 0x0b +#define MGMT_STATUS_NOT_SUPPORTED 0x0c +#define MGMT_STATUS_INVALID_PARAMS 0x0d +#define MGMT_STATUS_DISCONNECTED 0x0e +#define MGMT_STATUS_NOT_POWERED 0x0f +#define MGMT_STATUS_CANCELLED 0x10 +#define MGMT_STATUS_INVALID_INDEX 0x11 +#define MGMT_STATUS_RFKILLED 0x12 + +struct mgmt_hdr { + __le16 opcode; + __le16 index; + __le16 len; +} __packed; + +struct mgmt_addr_info { + bdaddr_t bdaddr; + __u8 type; +} __packed; +#define MGMT_ADDR_INFO_SIZE 7 + +#define MGMT_OP_READ_VERSION 0x0001 +#define MGMT_READ_VERSION_SIZE 0 +struct mgmt_rp_read_version { + __u8 version; + __le16 revision; +} __packed; + +#define MGMT_OP_READ_COMMANDS 0x0002 +#define MGMT_READ_COMMANDS_SIZE 0 +struct mgmt_rp_read_commands { + __le16 num_commands; + __le16 num_events; + __le16 opcodes[0]; +} __packed; + +#define MGMT_OP_READ_INDEX_LIST 0x0003 +#define MGMT_READ_INDEX_LIST_SIZE 0 +struct mgmt_rp_read_index_list { + __le16 num_controllers; + __le16 index[0]; +} __packed; + +/* Reserve one extra byte for names in management messages so that they + * are always guaranteed to be nul-terminated */ +#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1) +#define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1) + +#define MGMT_SETTING_POWERED 0x00000001 +#define MGMT_SETTING_CONNECTABLE 0x00000002 +#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004 +#define MGMT_SETTING_DISCOVERABLE 0x00000008 +#define MGMT_SETTING_PAIRABLE 0x00000010 +#define MGMT_SETTING_LINK_SECURITY 0x00000020 +#define MGMT_SETTING_SSP 0x00000040 +#define MGMT_SETTING_BREDR 0x00000080 +#define MGMT_SETTING_HS 0x00000100 +#define MGMT_SETTING_LE 0x00000200 +#define MGMT_SETTING_ADVERTISING 0x00000400 +#define MGMT_SETTING_SECURE_CONN 0x00000800 +#define MGMT_SETTING_DEBUG_KEYS 0x00001000 +#define MGMT_SETTING_PRIVACY 0x00002000 + +#define MGMT_OP_READ_INFO 0x0004 +#define MGMT_READ_INFO_SIZE 0 +struct mgmt_rp_read_info { + bdaddr_t bdaddr; + __u8 version; + __le16 manufacturer; + __le32 supported_settings; + __le32 current_settings; + __u8 dev_class[3]; + __u8 name[MGMT_MAX_NAME_LENGTH]; + __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; +} __packed; + +struct mgmt_mode { + __u8 val; +} __packed; + +#define MGMT_SETTING_SIZE 1 + +#define MGMT_OP_SET_POWERED 0x0005 + +#define MGMT_OP_SET_DISCOVERABLE 0x0006 +struct mgmt_cp_set_discoverable { + __u8 val; + __le16 timeout; +} __packed; +#define MGMT_SET_DISCOVERABLE_SIZE 3 + +#define MGMT_OP_SET_CONNECTABLE 0x0007 + +#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008 + +#define MGMT_OP_SET_PAIRABLE 0x0009 + +#define MGMT_OP_SET_LINK_SECURITY 0x000A + +#define MGMT_OP_SET_SSP 0x000B + +#define MGMT_OP_SET_HS 0x000C + +#define MGMT_OP_SET_LE 0x000D +#define MGMT_OP_SET_DEV_CLASS 0x000E +struct mgmt_cp_set_dev_class { + __u8 major; + __u8 minor; +} __packed; +#define MGMT_SET_DEV_CLASS_SIZE 2 + +#define MGMT_OP_SET_LOCAL_NAME 0x000F +struct mgmt_cp_set_local_name { + __u8 name[MGMT_MAX_NAME_LENGTH]; + __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; +} __packed; +#define MGMT_SET_LOCAL_NAME_SIZE 260 + +#define MGMT_OP_ADD_UUID 0x0010 +struct mgmt_cp_add_uuid { + __u8 uuid[16]; + __u8 svc_hint; +} __packed; +#define MGMT_ADD_UUID_SIZE 17 + +#define MGMT_OP_REMOVE_UUID 0x0011 +struct mgmt_cp_remove_uuid { + __u8 uuid[16]; +} __packed; +#define MGMT_REMOVE_UUID_SIZE 16 + +struct mgmt_link_key_info { + struct mgmt_addr_info addr; + __u8 type; + __u8 val[16]; + __u8 pin_len; +} __packed; + +#define MGMT_OP_LOAD_LINK_KEYS 0x0012 +struct mgmt_cp_load_link_keys { + __u8 debug_keys; + __le16 key_count; + struct mgmt_link_key_info keys[0]; +} __packed; +#define MGMT_LOAD_LINK_KEYS_SIZE 3 + +#define MGMT_LTK_UNAUTHENTICATED 0x00 +#define MGMT_LTK_AUTHENTICATED 0x01 + +struct mgmt_ltk_info { + struct mgmt_addr_info addr; + __u8 type; + __u8 master; + __u8 enc_size; + __le16 ediv; + __le64 rand; + __u8 val[16]; +} __packed; + +#define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013 +struct mgmt_cp_load_long_term_keys { + __le16 key_count; + struct mgmt_ltk_info keys[0]; +} __packed; +#define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2 + +#define MGMT_OP_DISCONNECT 0x0014 +struct mgmt_cp_disconnect { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_DISCONNECT_SIZE MGMT_ADDR_INFO_SIZE +struct mgmt_rp_disconnect { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_GET_CONNECTIONS 0x0015 +#define MGMT_GET_CONNECTIONS_SIZE 0 +struct mgmt_rp_get_connections { + __le16 conn_count; + struct mgmt_addr_info addr[0]; +} __packed; + +#define MGMT_OP_PIN_CODE_REPLY 0x0016 +struct mgmt_cp_pin_code_reply { + struct mgmt_addr_info addr; + __u8 pin_len; + __u8 pin_code[16]; +} __packed; +#define MGMT_PIN_CODE_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 17) +struct mgmt_rp_pin_code_reply { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017 +struct mgmt_cp_pin_code_neg_reply { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_PIN_CODE_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_SET_IO_CAPABILITY 0x0018 +struct mgmt_cp_set_io_capability { + __u8 io_capability; +} __packed; +#define MGMT_SET_IO_CAPABILITY_SIZE 1 + +#define MGMT_OP_PAIR_DEVICE 0x0019 +struct mgmt_cp_pair_device { + struct mgmt_addr_info addr; + __u8 io_cap; +} __packed; +#define MGMT_PAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1) +struct mgmt_rp_pair_device { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_CANCEL_PAIR_DEVICE 0x001A +#define MGMT_CANCEL_PAIR_DEVICE_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_UNPAIR_DEVICE 0x001B +struct mgmt_cp_unpair_device { + struct mgmt_addr_info addr; + __u8 disconnect; +} __packed; +#define MGMT_UNPAIR_DEVICE_SIZE (MGMT_ADDR_INFO_SIZE + 1) +struct mgmt_rp_unpair_device { + struct mgmt_addr_info addr; +}; + +#define MGMT_OP_USER_CONFIRM_REPLY 0x001C +struct mgmt_cp_user_confirm_reply { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_USER_CONFIRM_REPLY_SIZE MGMT_ADDR_INFO_SIZE +struct mgmt_rp_user_confirm_reply { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001D +struct mgmt_cp_user_confirm_neg_reply { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_USER_CONFIRM_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_USER_PASSKEY_REPLY 0x001E +struct mgmt_cp_user_passkey_reply { + struct mgmt_addr_info addr; + __le32 passkey; +} __packed; +#define MGMT_USER_PASSKEY_REPLY_SIZE (MGMT_ADDR_INFO_SIZE + 4) +struct mgmt_rp_user_passkey_reply { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001F +struct mgmt_cp_user_passkey_neg_reply { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_USER_PASSKEY_NEG_REPLY_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020 +#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0 +struct mgmt_rp_read_local_oob_data { + __u8 hash[16]; + __u8 randomizer[16]; +} __packed; +struct mgmt_rp_read_local_oob_ext_data { + __u8 hash192[16]; + __u8 randomizer192[16]; + __u8 hash256[16]; + __u8 randomizer256[16]; +} __packed; + +#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021 +struct mgmt_cp_add_remote_oob_data { + struct mgmt_addr_info addr; + __u8 hash[16]; + __u8 randomizer[16]; +} __packed; +#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32) +struct mgmt_cp_add_remote_oob_ext_data { + struct mgmt_addr_info addr; + __u8 hash192[16]; + __u8 randomizer192[16]; + __u8 hash256[16]; + __u8 randomizer256[16]; +} __packed; +#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64) + +#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022 +struct mgmt_cp_remove_remote_oob_data { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_REMOVE_REMOTE_OOB_DATA_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_START_DISCOVERY 0x0023 +struct mgmt_cp_start_discovery { + __u8 type; +} __packed; +#define MGMT_START_DISCOVERY_SIZE 1 + +#define MGMT_OP_STOP_DISCOVERY 0x0024 +struct mgmt_cp_stop_discovery { + __u8 type; +} __packed; +#define MGMT_STOP_DISCOVERY_SIZE 1 + +#define MGMT_OP_CONFIRM_NAME 0x0025 +struct mgmt_cp_confirm_name { + struct mgmt_addr_info addr; + __u8 name_known; +} __packed; +#define MGMT_CONFIRM_NAME_SIZE (MGMT_ADDR_INFO_SIZE + 1) +struct mgmt_rp_confirm_name { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_OP_BLOCK_DEVICE 0x0026 +struct mgmt_cp_block_device { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_BLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_UNBLOCK_DEVICE 0x0027 +struct mgmt_cp_unblock_device { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE + +#define MGMT_OP_SET_DEVICE_ID 0x0028 +struct mgmt_cp_set_device_id { + __le16 source; + __le16 vendor; + __le16 product; + __le16 version; +} __packed; +#define MGMT_SET_DEVICE_ID_SIZE 8 + +#define MGMT_OP_SET_ADVERTISING 0x0029 + +#define MGMT_OP_SET_BREDR 0x002A + +#define MGMT_OP_SET_STATIC_ADDRESS 0x002B +struct mgmt_cp_set_static_address { + bdaddr_t bdaddr; +} __packed; +#define MGMT_SET_STATIC_ADDRESS_SIZE 6 + +#define MGMT_OP_SET_SCAN_PARAMS 0x002C +struct mgmt_cp_set_scan_params { + __le16 interval; + __le16 window; +} __packed; +#define MGMT_SET_SCAN_PARAMS_SIZE 4 + +#define MGMT_OP_SET_SECURE_CONN 0x002D + +#define MGMT_OP_SET_DEBUG_KEYS 0x002E + +#define MGMT_OP_SET_PRIVACY 0x002F +struct mgmt_cp_set_privacy { + __u8 privacy; + __u8 irk[16]; +} __packed; +#define MGMT_SET_PRIVACY_SIZE 17 + +struct mgmt_irk_info { + struct mgmt_addr_info addr; + __u8 val[16]; +} __packed; + +#define MGMT_OP_LOAD_IRKS 0x0030 +struct mgmt_cp_load_irks { + __le16 irk_count; + struct mgmt_irk_info irks[0]; +} __packed; +#define MGMT_LOAD_IRKS_SIZE 2 + +#define MGMT_OP_GET_CONN_INFO 0x0031 +struct mgmt_cp_get_conn_info { + struct mgmt_addr_info addr; +} __packed; +#define MGMT_GET_CONN_INFO_SIZE MGMT_ADDR_INFO_SIZE +struct mgmt_rp_get_conn_info { + struct mgmt_addr_info addr; + __s8 rssi; + __s8 tx_power; + __s8 max_tx_power; +} __packed; + +#define MGMT_EV_CMD_COMPLETE 0x0001 +struct mgmt_ev_cmd_complete { + __le16 opcode; + __u8 status; + __u8 data[0]; +} __packed; + +#define MGMT_EV_CMD_STATUS 0x0002 +struct mgmt_ev_cmd_status { + __le16 opcode; + __u8 status; +} __packed; + +#define MGMT_EV_CONTROLLER_ERROR 0x0003 +struct mgmt_ev_controller_error { + __u8 error_code; +} __packed; + +#define MGMT_EV_INDEX_ADDED 0x0004 + +#define MGMT_EV_INDEX_REMOVED 0x0005 + +#define MGMT_EV_NEW_SETTINGS 0x0006 + +#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007 +struct mgmt_ev_class_of_dev_changed { + __u8 dev_class[3]; +}; + +#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008 +struct mgmt_ev_local_name_changed { + __u8 name[MGMT_MAX_NAME_LENGTH]; + __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; +} __packed; + +#define MGMT_EV_NEW_LINK_KEY 0x0009 +struct mgmt_ev_new_link_key { + __u8 store_hint; + struct mgmt_link_key_info key; +} __packed; + +#define MGMT_EV_NEW_LONG_TERM_KEY 0x000A +struct mgmt_ev_new_long_term_key { + __u8 store_hint; + struct mgmt_ltk_info key; +} __packed; + +#define MGMT_EV_DEVICE_CONNECTED 0x000B +struct mgmt_ev_device_connected { + struct mgmt_addr_info addr; + __le32 flags; + __le16 eir_len; + __u8 eir[0]; +} __packed; + +#define MGMT_DEV_DISCONN_UNKNOWN 0x00 +#define MGMT_DEV_DISCONN_TIMEOUT 0x01 +#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02 +#define MGMT_DEV_DISCONN_REMOTE 0x03 + +#define MGMT_EV_DEVICE_DISCONNECTED 0x000C +struct mgmt_ev_device_disconnected { + struct mgmt_addr_info addr; + __u8 reason; +} __packed; + +#define MGMT_EV_CONNECT_FAILED 0x000D +struct mgmt_ev_connect_failed { + struct mgmt_addr_info addr; + __u8 status; +} __packed; + +#define MGMT_EV_PIN_CODE_REQUEST 0x000E +struct mgmt_ev_pin_code_request { + struct mgmt_addr_info addr; + __u8 secure; +} __packed; + +#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F +struct mgmt_ev_user_confirm_request { + struct mgmt_addr_info addr; + __u8 confirm_hint; + __le32 value; +} __packed; + +#define MGMT_EV_USER_PASSKEY_REQUEST 0x0010 +struct mgmt_ev_user_passkey_request { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_EV_AUTH_FAILED 0x0011 +struct mgmt_ev_auth_failed { + struct mgmt_addr_info addr; + __u8 status; +} __packed; + +#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 +#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 + +#define MGMT_EV_DEVICE_FOUND 0x0012 +struct mgmt_ev_device_found { + struct mgmt_addr_info addr; + __s8 rssi; + __le32 flags; + __le16 eir_len; + __u8 eir[0]; +} __packed; + +#define MGMT_EV_DISCOVERING 0x0013 +struct mgmt_ev_discovering { + __u8 type; + __u8 discovering; +} __packed; + +#define MGMT_EV_DEVICE_BLOCKED 0x0014 +struct mgmt_ev_device_blocked { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_EV_DEVICE_UNBLOCKED 0x0015 +struct mgmt_ev_device_unblocked { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_EV_DEVICE_UNPAIRED 0x0016 +struct mgmt_ev_device_unpaired { + struct mgmt_addr_info addr; +} __packed; + +#define MGMT_EV_PASSKEY_NOTIFY 0x0017 +struct mgmt_ev_passkey_notify { + struct mgmt_addr_info addr; + __le32 passkey; + __u8 entered; +} __packed; + +#define MGMT_EV_NEW_IRK 0x0018 +struct mgmt_ev_new_irk { + __u8 store_hint; + bdaddr_t rpa; + struct mgmt_irk_info irk; +} __packed; + +struct mgmt_csrk_info { + struct mgmt_addr_info addr; + __u8 master; + __u8 val[16]; +} __packed; + +#define MGMT_EV_NEW_CSRK 0x0019 +struct mgmt_ev_new_csrk { + __u8 store_hint; + struct mgmt_csrk_info key; +} __packed; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 98ec7a32068..578b83127af 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -1,5 +1,5 @@ -/* - RFCOMM implementation for Linux Bluetooth stack (BlueZ). +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ) Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> @@ -11,13 +11,13 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ @@ -29,6 +29,7 @@ #define RFCOMM_CONN_TIMEOUT (HZ * 30) #define RFCOMM_DISC_TIMEOUT (HZ * 20) #define RFCOMM_AUTH_TIMEOUT (HZ * 25) +#define RFCOMM_IDLE_TIMEOUT (HZ * 2) #define RFCOMM_DEFAULT_MTU 127 #define RFCOMM_DEFAULT_CREDITS 7 @@ -104,20 +105,20 @@ struct rfcomm_hdr { u8 addr; u8 ctrl; - u8 len; // Actual size can be 2 bytes -} __attribute__ ((packed)); + u8 len; /* Actual size can be 2 bytes */ +} __packed; struct rfcomm_cmd { u8 addr; u8 ctrl; u8 len; u8 fcs; -} __attribute__ ((packed)); +} __packed; struct rfcomm_mcc { u8 type; u8 len; -} __attribute__ ((packed)); +} __packed; struct rfcomm_pn { u8 dlci; @@ -127,7 +128,7 @@ struct rfcomm_pn { __le16 mtu; u8 max_retrans; u8 credits; -} __attribute__ ((packed)); +} __packed; struct rfcomm_rpn { u8 dlci; @@ -137,26 +138,26 @@ struct rfcomm_rpn { u8 xon_char; u8 xoff_char; __le16 param_mask; -} __attribute__ ((packed)); +} __packed; struct rfcomm_rls { u8 dlci; u8 status; -} __attribute__ ((packed)); +} __packed; struct rfcomm_msc { u8 dlci; u8 v24_sig; -} __attribute__ ((packed)); +} __packed; /* ---- Core structures, flags etc ---- */ struct rfcomm_session { struct list_head list; struct socket *sock; + struct timer_list timer; unsigned long state; unsigned long flags; - atomic_t refcnt; int initiator; /* Default DLC parameters */ @@ -172,7 +173,7 @@ struct rfcomm_dlc { struct sk_buff_head tx_queue; struct timer_list timer; - spinlock_t lock; + struct mutex lock; unsigned long state; unsigned long flags; atomic_t refcnt; @@ -180,9 +181,12 @@ struct rfcomm_dlc { u8 addr; u8 priority; u8 v24_sig; + u8 remote_v24_sig; u8 mscex; - - u32 link_mode; + u8 out; + u8 sec_level; + u8 role_switch; + u32 defer_setup; uint mtu; uint cfc; @@ -200,17 +204,15 @@ struct rfcomm_dlc { #define RFCOMM_RX_THROTTLED 0 #define RFCOMM_TX_THROTTLED 1 #define RFCOMM_TIMED_OUT 2 -#define RFCOMM_MSC_PENDING 3 -#define RFCOMM_AUTH_PENDING 4 -#define RFCOMM_AUTH_ACCEPT 5 -#define RFCOMM_AUTH_REJECT 6 +#define RFCOMM_MSC_PENDING 3 +#define RFCOMM_SEC_PENDING 4 +#define RFCOMM_AUTH_PENDING 5 +#define RFCOMM_AUTH_ACCEPT 6 +#define RFCOMM_AUTH_REJECT 7 +#define RFCOMM_DEFER_SETUP 8 +#define RFCOMM_ENC_DROP 9 /* Scheduling flags and events */ -#define RFCOMM_SCHED_STATE 0 -#define RFCOMM_SCHED_RX 1 -#define RFCOMM_SCHED_TX 2 -#define RFCOMM_SCHED_TIMEO 3 -#define RFCOMM_SCHED_AUTH 4 #define RFCOMM_SCHED_WAKEUP 31 /* MSC exchange flags */ @@ -226,20 +228,24 @@ struct rfcomm_dlc { /* ---- RFCOMM SEND RPN ---- */ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, u8 bit_rate, u8 data_bits, u8 stop_bits, - u8 parity, u8 flow_ctrl_settings, + u8 parity, u8 flow_ctrl_settings, u8 xon_char, u8 xoff_char, u16 param_mask); /* ---- RFCOMM DLCs (channels) ---- */ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); void rfcomm_dlc_free(struct rfcomm_dlc *d); -int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); +int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, + u8 channel); int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason); int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb); +void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb); int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig); int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig); +void rfcomm_dlc_accept(struct rfcomm_dlc *d); +struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel); -#define rfcomm_dlc_lock(d) spin_lock(&d->lock) -#define rfcomm_dlc_unlock(d) spin_unlock(&d->lock) +#define rfcomm_dlc_lock(d) mutex_lock(&d->lock) +#define rfcomm_dlc_unlock(d) mutex_unlock(&d->lock) static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d) { @@ -252,8 +258,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d) rfcomm_dlc_free(d); } -extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d); -extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d); +void __rfcomm_dlc_throttle(struct rfcomm_dlc *d); +void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d); static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d) { @@ -268,12 +274,8 @@ static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) } /* ---- RFCOMM sessions ---- */ -void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst); - -static inline void rfcomm_session_hold(struct rfcomm_session *s) -{ - atomic_inc(&s->refcnt); -} +void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, + bdaddr_t *dst); /* ---- RFCOMM sockets ---- */ struct sockaddr_rc { @@ -295,20 +297,25 @@ struct rfcomm_conninfo { #define RFCOMM_LM_TRUSTED 0x0008 #define RFCOMM_LM_RELIABLE 0x0010 #define RFCOMM_LM_SECURE 0x0020 +#define RFCOMM_LM_FIPS 0x0040 #define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk) struct rfcomm_pinfo { struct bt_sock bt; + bdaddr_t src; + bdaddr_t dst; struct rfcomm_dlc *dlc; u8 channel; - u32 link_mode; + u8 sec_level; + u8 role_switch; }; int rfcomm_init_sockets(void); void rfcomm_cleanup_sockets(void); -int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d); +int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, + struct rfcomm_dlc **d); /* ---- RFCOMM TTY ---- */ #define RFCOMM_MAX_DEV 256 @@ -319,11 +326,16 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc #define RFCOMMGETDEVINFO _IOR('R', 211, int) #define RFCOMMSTEALDLC _IOW('R', 220, int) +/* rfcomm_dev.flags bit definitions */ #define RFCOMM_REUSE_DLC 0 #define RFCOMM_RELEASE_ONHUP 1 #define RFCOMM_HANGUP_NOW 2 #define RFCOMM_TTY_ATTACHED 3 -#define RFCOMM_TTY_RELEASED 4 +#define RFCOMM_DEFUNCT_BIT4 4 /* don't reuse this bit - userspace visible */ + +/* rfcomm_dev.status bit definitions */ +#define RFCOMM_DEV_RELEASED 0 +#define RFCOMM_TTY_OWNED 1 struct rfcomm_dev_req { s16 dev_id; @@ -331,7 +343,6 @@ struct rfcomm_dev_req { bdaddr_t src; bdaddr_t dst; u8 channel; - }; struct rfcomm_dev_info { @@ -349,7 +360,17 @@ struct rfcomm_dev_list_req { }; int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); + +#ifdef CONFIG_BT_RFCOMM_TTY int rfcomm_init_ttys(void); void rfcomm_cleanup_ttys(void); - +#else +static inline int rfcomm_init_ttys(void) +{ + return 0; +} +static inline void rfcomm_cleanup_ttys(void) +{ +} +#endif #endif /* __RFCOMM_H */ diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index e28a2a77147..2019d1a0996 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -1,4 +1,4 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated @@ -12,13 +12,13 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ @@ -55,11 +55,8 @@ struct sco_conninfo { struct sco_conn { struct hci_conn *hcon; - bdaddr_t *dst; - bdaddr_t *src; - spinlock_t lock; - struct sock *sk; + struct sock *sk; unsigned int mtu; }; @@ -72,7 +69,10 @@ struct sco_conn { struct sco_pinfo { struct bt_sock bt; + bdaddr_t src; + bdaddr_t dst; __u32 flags; + __u16 setting; struct sco_conn *conn; }; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h new file mode 100644 index 00000000000..1d67fb6b23a --- /dev/null +++ b/include/net/busy_poll.h @@ -0,0 +1,169 @@ +/* + * net busy poll support + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Author: Eliezer Tamir + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + */ + +#ifndef _LINUX_NET_BUSY_POLL_H +#define _LINUX_NET_BUSY_POLL_H + +#include <linux/netdevice.h> +#include <net/ip.h> + +#ifdef CONFIG_NET_RX_BUSY_POLL + +struct napi_struct; +extern unsigned int sysctl_net_busy_read __read_mostly; +extern unsigned int sysctl_net_busy_poll __read_mostly; + +/* return values from ndo_ll_poll */ +#define LL_FLUSH_FAILED -1 +#define LL_FLUSH_BUSY -2 + +static inline bool net_busy_loop_on(void) +{ + return sysctl_net_busy_poll; +} + +static inline u64 busy_loop_us_clock(void) +{ + return local_clock() >> 10; +} + +static inline unsigned long sk_busy_loop_end_time(struct sock *sk) +{ + return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); +} + +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline unsigned long busy_loop_end_time(void) +{ + return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll); +} + +static inline bool sk_can_busy_loop(struct sock *sk) +{ + return sk->sk_ll_usec && sk->sk_napi_id && + !need_resched() && !signal_pending(current); +} + + +static inline bool busy_loop_timeout(unsigned long end_time) +{ + unsigned long now = busy_loop_us_clock(); + + return time_after(now, end_time); +} + +/* when used in sock_poll() nonblock is known at compile time to be true + * so the loop and end_time will be optimized out + */ +static inline bool sk_busy_loop(struct sock *sk, int nonblock) +{ + unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; + const struct net_device_ops *ops; + struct napi_struct *napi; + int rc = false; + + /* + * rcu read lock for napi hash + * bh so we don't race with net_rx_action + */ + rcu_read_lock_bh(); + + napi = napi_by_id(sk->sk_napi_id); + if (!napi) + goto out; + + ops = napi->dev->netdev_ops; + if (!ops->ndo_busy_poll) + goto out; + + do { + rc = ops->ndo_busy_poll(napi); + + if (rc == LL_FLUSH_FAILED) + break; /* permanent failure */ + + if (rc > 0) + /* local bh are disabled so it is ok to use _BH */ + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_BUSYPOLLRXPACKETS, rc); + cpu_relax(); + + } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && + !need_resched() && !busy_loop_timeout(end_time)); + + rc = !skb_queue_empty(&sk->sk_receive_queue); +out: + rcu_read_unlock_bh(); + return rc; +} + +/* used in the NIC receive handler to mark the skb */ +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) +{ + skb->napi_id = napi->napi_id; +} + +/* used in the protocol hanlder to propagate the napi_id to the socket */ +static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) +{ + sk->sk_napi_id = skb->napi_id; +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline unsigned long net_busy_loop_on(void) +{ + return 0; +} + +static inline unsigned long busy_loop_end_time(void) +{ + return 0; +} + +static inline bool sk_can_busy_loop(struct sock *sk) +{ + return false; +} + +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) +{ +} + +static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline bool busy_loop_timeout(unsigned long end_time) +{ + return true; +} + +static inline bool sk_busy_loop(struct sock *sk, int nonblock) +{ + return false; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* _LINUX_NET_BUSY_POLL_H */ diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h new file mode 100644 index 00000000000..028b754ae9b --- /dev/null +++ b/include/net/caif/caif_dev.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_DEV_H_ +#define CAIF_DEV_H_ + +#include <net/caif/caif_layer.h> +#include <net/caif/cfcnfg.h> +#include <net/caif/caif_device.h> +#include <linux/caif/caif_socket.h> +#include <linux/if.h> +#include <linux/net.h> + +/** + * struct caif_param - CAIF parameters. + * @size: Length of data + * @data: Binary Data Blob + */ +struct caif_param { + u16 size; + u8 data[256]; +}; + +/** + * struct caif_connect_request - Request data for CAIF channel setup. + * @protocol: Type of CAIF protocol to use (at, datagram etc) + * @sockaddr: Socket address to connect. + * @priority: Priority of the connection. + * @link_selector: Link selector (high bandwidth or low latency) + * @ifindex: kernel index of the interface. + * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). + * + * This struct is used when connecting a CAIF channel. + * It contains all CAIF channel configuration options. + */ +struct caif_connect_request { + enum caif_protocol_type protocol; + struct sockaddr_caif sockaddr; + enum caif_channel_priority priority; + enum caif_link_selector link_selector; + int ifindex; + struct caif_param param; +}; + +/** + * caif_connect_client - Connect a client to CAIF Core Stack. + * @config: Channel setup parameters, specifying what address + * to connect on the Modem. + * @client_layer: User implementation of client layer. This layer + * MUST have receive and control callback functions + * implemented. + * @ifindex: Link layer interface index used for this connection. + * @headroom: Head room needed by CAIF protocol. + * @tailroom: Tail room needed by CAIF protocol. + * + * This function connects a CAIF channel. The Client must implement + * the struct cflayer. This layer represents the Client layer and holds + * receive functions and control callback functions. Control callback + * function will receive information about connect/disconnect responses, + * flow control etc (see enum caif_control). + * E.g. CAIF Socket will call this function for each socket it connects + * and have one client_layer instance for each socket. + */ +int caif_connect_client(struct net *net, + struct caif_connect_request *conn_req, + struct cflayer *client_layer, int *ifindex, + int *headroom, int *tailroom); + +/** + * caif_disconnect_client - Disconnects a client from the CAIF stack. + * + * @client_layer: Client layer to be disconnected. + */ +int caif_disconnect_client(struct net *net, struct cflayer *client_layer); + + +/** + * caif_client_register_refcnt - register ref-count functions provided by client. + * + * @adapt_layer: Client layer using CAIF Stack. + * @hold: Function provided by client layer increasing ref-count + * @put: Function provided by client layer decreasing ref-count + * + * Client of the CAIF Stack must register functions for reference counting. + * These functions are called by the CAIF Stack for every upstream packet, + * and must therefore be implemented efficiently. + * + * Client should call caif_free_client when reference count degrease to zero. + */ + +void caif_client_register_refcnt(struct cflayer *adapt_layer, + void (*hold)(struct cflayer *lyr), + void (*put)(struct cflayer *lyr)); +/** + * caif_free_client - Free memory used to manage the client in the CAIF Stack. + * + * @client_layer: Client layer to be removed. + * + * This function must be called from client layer in order to free memory. + * Caller must guarantee that no packets are in flight upstream when calling + * this function. + */ +void caif_free_client(struct cflayer *adap_layer); + +/** + * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer + * @dev: Network device to enroll. + * @caifdev: Configuration information from CAIF Link Layer + * @link_support: Link layer support layer + * @head_room: Head room needed by link support layer + * @layer: Lowest layer in CAIF stack + * @rcv_fun: Receive function for CAIF stack. + * + * This function enroll a CAIF link layer into CAIF Stack and + * expects the interface to be able to handle CAIF payload. + * The link_support layer is used to add any Link Layer specific + * framing. + */ +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *)); + +#endif /* CAIF_DEV_H_ */ diff --git a/include/net/caif/caif_device.h b/include/net/caif/caif_device.h new file mode 100644 index 00000000000..d6e3c4267c8 --- /dev/null +++ b/include/net/caif/caif_device.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_DEVICE_H_ +#define CAIF_DEVICE_H_ +#include <linux/kernel.h> +#include <linux/net.h> +#include <linux/netdevice.h> +#include <linux/caif/caif_socket.h> +#include <net/caif/caif_device.h> + +/** + * struct caif_dev_common - data shared between CAIF drivers and stack. + * @flowctrl: Flow Control callback function. This function is + * supplied by CAIF Core Stack and is used by CAIF + * Link Layer to send flow-stop to CAIF Core. + * The flow information will be distributed to all + * clients of CAIF. + * + * @link_select: Profile of device, either high-bandwidth or + * low-latency. This member is set by CAIF Link + * Layer Device in order to indicate if this device + * is a high bandwidth or low latency device. + * + * @use_frag: CAIF Frames may be fragmented. + * Is set by CAIF Link Layer in order to indicate if the + * interface receives fragmented frames that must be + * assembled by CAIF Core Layer. + * + * @use_fcs: Indicate if Frame CheckSum (fcs) is used. + * Is set if the physical interface is + * using Frame Checksum on the CAIF Frames. + * + * @use_stx: Indicate STart of frame eXtension (stx) in use. + * Is set if the CAIF Link Layer expects + * CAIF Frames to start with the STX byte. + * + * This structure is shared between the CAIF drivers and the CAIF stack. + * It is used by the device to register its behavior. + * CAIF Core layer must set the member flowctrl in order to supply + * CAIF Link Layer with the flow control function. + * + */ + struct caif_dev_common { + void (*flowctrl)(struct net_device *net, int on); + enum caif_link_selector link_select; + int use_frag; + int use_fcs; + int use_stx; +}; + +#endif /* CAIF_DEVICE_H_ */ diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h new file mode 100644 index 00000000000..097f69cfaa7 --- /dev/null +++ b/include/net/caif/caif_hsi.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Daniel Martensson / daniel.martensson@stericsson.com + * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_HSI_H_ +#define CAIF_HSI_H_ + +#include <net/caif/caif_layer.h> +#include <net/caif/caif_device.h> +#include <linux/atomic.h> + +/* + * Maximum number of CAIF frames that can reside in the same HSI frame. + */ +#define CFHSI_MAX_PKTS 15 + +/* + * Maximum number of bytes used for the frame that can be embedded in the + * HSI descriptor. + */ +#define CFHSI_MAX_EMB_FRM_SZ 96 + +/* + * Decides if HSI buffers should be prefilled with 0xFF pattern for easier + * debugging. Both TX and RX buffers will be filled before the transfer. + */ +#define CFHSI_DBG_PREFILL 0 + +/* Structure describing a HSI packet descriptor. */ +#pragma pack(1) /* Byte alignment. */ +struct cfhsi_desc { + u8 header; + u8 offset; + u16 cffrm_len[CFHSI_MAX_PKTS]; + u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; +}; +#pragma pack() /* Default alignment. */ + +/* Size of the complete HSI packet descriptor. */ +#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) + +/* + * Size of the complete HSI packet descriptor excluding the optional embedded + * CAIF frame. + */ +#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) + +/* + * Maximum bytes transferred in one transfer. + */ +#define CFHSI_MAX_CAIF_FRAME_SZ 4096 + +#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ) + +/* Size of the complete HSI TX buffer. */ +#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) + +/* Size of the complete HSI RX buffer. */ +#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) + +/* Bitmasks for the HSI descriptor. */ +#define CFHSI_PIGGY_DESC (0x01 << 7) + +#define CFHSI_TX_STATE_IDLE 0 +#define CFHSI_TX_STATE_XFER 1 + +#define CFHSI_RX_STATE_DESC 0 +#define CFHSI_RX_STATE_PAYLOAD 1 + +/* Bitmasks for power management. */ +#define CFHSI_WAKE_UP 0 +#define CFHSI_WAKE_UP_ACK 1 +#define CFHSI_WAKE_DOWN_ACK 2 +#define CFHSI_AWAKE 3 +#define CFHSI_WAKELOCK_HELD 4 +#define CFHSI_SHUTDOWN 5 +#define CFHSI_FLUSH_FIFO 6 + +#ifndef CFHSI_INACTIVITY_TOUT +#define CFHSI_INACTIVITY_TOUT (1 * HZ) +#endif /* CFHSI_INACTIVITY_TOUT */ + +#ifndef CFHSI_WAKE_TOUT +#define CFHSI_WAKE_TOUT (3 * HZ) +#endif /* CFHSI_WAKE_TOUT */ + +#ifndef CFHSI_MAX_RX_RETRIES +#define CFHSI_MAX_RX_RETRIES (10 * HZ) +#endif + +/* Structure implemented by the CAIF HSI driver. */ +struct cfhsi_cb_ops { + void (*tx_done_cb) (struct cfhsi_cb_ops *drv); + void (*rx_done_cb) (struct cfhsi_cb_ops *drv); + void (*wake_up_cb) (struct cfhsi_cb_ops *drv); + void (*wake_down_cb) (struct cfhsi_cb_ops *drv); +}; + +/* Structure implemented by HSI device. */ +struct cfhsi_ops { + int (*cfhsi_up) (struct cfhsi_ops *dev); + int (*cfhsi_down) (struct cfhsi_ops *dev); + int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev); + int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev); + int (*cfhsi_wake_up) (struct cfhsi_ops *dev); + int (*cfhsi_wake_down) (struct cfhsi_ops *dev); + int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status); + int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy); + int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev); + struct cfhsi_cb_ops *cb_ops; +}; + +/* Structure holds status of received CAIF frames processing */ +struct cfhsi_rx_state { + int state; + int nfrms; + int pld_len; + int retries; + bool piggy_desc; +}; + +/* Priority mapping */ +enum { + CFHSI_PRIO_CTL = 0, + CFHSI_PRIO_VI, + CFHSI_PRIO_VO, + CFHSI_PRIO_BEBK, + CFHSI_PRIO_LAST, +}; + +struct cfhsi_config { + u32 inactivity_timeout; + u32 aggregation_timeout; + u32 head_align; + u32 tail_align; + u32 q_high_mark; + u32 q_low_mark; +}; + +/* Structure implemented by CAIF HSI drivers. */ +struct cfhsi { + struct caif_dev_common cfdev; + struct net_device *ndev; + struct platform_device *pdev; + struct sk_buff_head qhead[CFHSI_PRIO_LAST]; + struct cfhsi_cb_ops cb_ops; + struct cfhsi_ops *ops; + int tx_state; + struct cfhsi_rx_state rx_state; + struct cfhsi_config cfg; + int rx_len; + u8 *rx_ptr; + u8 *tx_buf; + u8 *rx_buf; + u8 *rx_flip_buf; + spinlock_t lock; + int flow_off_sent; + struct list_head list; + struct work_struct wake_up_work; + struct work_struct wake_down_work; + struct work_struct out_of_sync_work; + struct workqueue_struct *wq; + wait_queue_head_t wake_up_wait; + wait_queue_head_t wake_down_wait; + wait_queue_head_t flush_fifo_wait; + struct timer_list inactivity_timer; + struct timer_list rx_slowpath_timer; + + /* TX aggregation */ + int aggregation_len; + struct timer_list aggregation_timer; + + unsigned long bits; +}; +extern struct platform_driver cfhsi_driver; + +/** + * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters. + * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before + * taking the HSI wakeline down, in milliseconds. + * When using RT Netlink to create, destroy or configure a CAIF HSI interface, + * enum ifla_caif_hsi is used to specify the configuration attributes. + */ +enum ifla_caif_hsi { + __IFLA_CAIF_HSI_UNSPEC, + __IFLA_CAIF_HSI_INACTIVITY_TOUT, + __IFLA_CAIF_HSI_AGGREGATION_TOUT, + __IFLA_CAIF_HSI_HEAD_ALIGN, + __IFLA_CAIF_HSI_TAIL_ALIGN, + __IFLA_CAIF_HSI_QHIGH_WATERMARK, + __IFLA_CAIF_HSI_QLOW_WATERMARK, + __IFLA_CAIF_HSI_MAX +}; + +struct cfhsi_ops *cfhsi_get_ops(void); + +#endif /* CAIF_HSI_H_ */ diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h new file mode 100644 index 00000000000..94e5ed64dc6 --- /dev/null +++ b/include/net/caif/caif_layer.h @@ -0,0 +1,279 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_LAYER_H_ +#define CAIF_LAYER_H_ + +#include <linux/list.h> + +struct cflayer; +struct cfpkt; +struct cfpktq; +struct caif_payload_info; +struct caif_packet_funcs; + +#define CAIF_LAYER_NAME_SZ 16 + +/** + * caif_assert() - Assert function for CAIF. + * @assert: expression to evaluate. + * + * This function will print a error message and a do WARN_ON if the + * assertion failes. Normally this will do a stack up at the current location. + */ +#define caif_assert(assert) \ +do { \ + if (!(assert)) { \ + pr_err("caif:Assert detected:'%s'\n", #assert); \ + WARN_ON(!(assert)); \ + } \ +} while (0) + +/** + * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd(). + * + * @CAIF_CTRLCMD_FLOW_OFF_IND: Flow Control is OFF, transmit function + * should stop sending data + * + * @CAIF_CTRLCMD_FLOW_ON_IND: Flow Control is ON, transmit function + * can start sending data + * + * @CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: Remote end modem has decided to close + * down channel + * + * @CAIF_CTRLCMD_INIT_RSP: Called initially when the layer below + * has finished initialization + * + * @CAIF_CTRLCMD_DEINIT_RSP: Called when de-initialization is + * complete + * + * @CAIF_CTRLCMD_INIT_FAIL_RSP: Called if initialization fails + * + * @_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: CAIF Link layer temporarily cannot + * send more packets. + * @_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: Called if CAIF Link layer is able + * to send packets again. + * @_CAIF_CTRLCMD_PHYIF_DOWN_IND: Called if CAIF Link layer is going + * down. + * + * These commands are sent upwards in the CAIF stack to the CAIF Client. + * They are used for signaling originating from the modem or CAIF Link Layer. + * These are either responses (*_RSP) or events (*_IND). + */ +enum caif_ctrlcmd { + CAIF_CTRLCMD_FLOW_OFF_IND, + CAIF_CTRLCMD_FLOW_ON_IND, + CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, + CAIF_CTRLCMD_INIT_RSP, + CAIF_CTRLCMD_DEINIT_RSP, + CAIF_CTRLCMD_INIT_FAIL_RSP, + _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, + _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, + _CAIF_CTRLCMD_PHYIF_DOWN_IND, +}; + +/** + * enum caif_modemcmd - Modem Control Signaling, sent from CAIF Client + * to the CAIF Link Layer or modem. + * + * @CAIF_MODEMCMD_FLOW_ON_REQ: Flow Control is ON, transmit function + * can start sending data. + * + * @CAIF_MODEMCMD_FLOW_OFF_REQ: Flow Control is OFF, transmit function + * should stop sending data. + * + * @_CAIF_MODEMCMD_PHYIF_USEFULL: Notify physical layer that it is in use + * + * @_CAIF_MODEMCMD_PHYIF_USELESS: Notify physical layer that it is + * no longer in use. + * + * These are requests sent 'downwards' in the stack. + * Flow ON, OFF can be indicated to the modem. + */ +enum caif_modemcmd { + CAIF_MODEMCMD_FLOW_ON_REQ = 0, + CAIF_MODEMCMD_FLOW_OFF_REQ = 1, + _CAIF_MODEMCMD_PHYIF_USEFULL = 3, + _CAIF_MODEMCMD_PHYIF_USELESS = 4 +}; + +/** + * enum caif_direction - CAIF Packet Direction. + * Indicate if a packet is to be sent out or to be received in. + * @CAIF_DIR_IN: Incoming packet received. + * @CAIF_DIR_OUT: Outgoing packet to be transmitted. + */ +enum caif_direction { + CAIF_DIR_IN = 0, + CAIF_DIR_OUT = 1 +}; + +/** + * struct cflayer - CAIF Stack layer. + * Defines the framework for the CAIF Core Stack. + * @up: Pointer up to the layer above. + * @dn: Pointer down to the layer below. + * @node: List node used when layer participate in a list. + * @receive: Packet receive function. + * @transmit: Packet transmit funciton. + * @ctrlcmd: Used for control signalling upwards in the stack. + * @modemcmd: Used for control signaling downwards in the stack. + * @id: The identity of this layer + * @name: Name of the layer. + * + * This structure defines the layered structure in CAIF. + * + * It defines CAIF layering structure, used by all CAIF Layers and the + * layers interfacing CAIF. + * + * In order to integrate with CAIF an adaptation layer on top of the CAIF stack + * and PHY layer below the CAIF stack + * must be implemented. These layer must follow the design principles below. + * + * Principles for layering of protocol layers: + * - All layers must use this structure. If embedding it, then place this + * structure first in the layer specific structure. + * + * - Each layer should not depend on any others layer's private data. + * + * - In order to send data upwards do + * layer->up->receive(layer->up, packet); + * + * - In order to send data downwards do + * layer->dn->transmit(layer->dn, info, packet); + */ +struct cflayer { + struct cflayer *up; + struct cflayer *dn; + struct list_head node; + + /* + * receive() - Receive Function (non-blocking). + * Contract: Each layer must implement a receive function passing the + * CAIF packets upwards in the stack. + * Packet handling rules: + * - The CAIF packet (cfpkt) ownership is passed to the + * called receive function. This means that the the + * packet cannot be accessed after passing it to the + * above layer using up->receive(). + * + * - If parsing of the packet fails, the packet must be + * destroyed and negative error code returned + * from the function. + * EXCEPTION: If the framing layer (cffrml) returns + * -EILSEQ, the packet is not freed. + * + * - If parsing succeeds (and above layers return OK) then + * the function must return a value >= 0. + * + * Returns result < 0 indicates an error, 0 or positive value + * indicates success. + * + * @layr: Pointer to the current layer the receive function is + * implemented for (this pointer). + * @cfpkt: Pointer to CaifPacket to be handled. + */ + int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt); + + /* + * transmit() - Transmit Function (non-blocking). + * Contract: Each layer must implement a transmit function passing the + * CAIF packet downwards in the stack. + * Packet handling rules: + * - The CAIF packet (cfpkt) ownership is passed to the + * transmit function. This means that the the packet + * cannot be accessed after passing it to the below + * layer using dn->transmit(). + * + * - Upon error the packet ownership is still passed on, + * so the packet shall be freed where error is detected. + * Callers of the transmit function shall not free packets, + * but errors shall be returned. + * + * - Return value less than zero means error, zero or + * greater than zero means OK. + * + * Returns result < 0 indicates an error, 0 or positive value + * indicates success. + * + * @layr: Pointer to the current layer the receive function + * isimplemented for (this pointer). + * @cfpkt: Pointer to CaifPacket to be handled. + */ + int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt); + + /* + * cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking). + * Used for signaling responses (CAIF_CTRLCMD_*_RSP) + * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND) + * + * @layr: Pointer to the current layer the receive function + * is implemented for (this pointer). + * @ctrl: Control Command. + */ + void (*ctrlcmd) (struct cflayer *layr, enum caif_ctrlcmd ctrl, + int phyid); + + /* + * modemctrl() - Control Function used for controlling the modem. + * Used to signal down-wards in the CAIF stack. + * Returns 0 on success, < 0 upon failure. + * + * @layr: Pointer to the current layer the receive function + * is implemented for (this pointer). + * @ctrl: Control Command. + */ + int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl); + + unsigned int id; + char name[CAIF_LAYER_NAME_SZ]; +}; + +/** + * layer_set_up() - Set the up pointer for a specified layer. + * @layr: Layer where up pointer shall be set. + * @above: Layer above. + */ +#define layer_set_up(layr, above) ((layr)->up = (struct cflayer *)(above)) + +/** + * layer_set_dn() - Set the down pointer for a specified layer. + * @layr: Layer where down pointer shall be set. + * @below: Layer below. + */ +#define layer_set_dn(layr, below) ((layr)->dn = (struct cflayer *)(below)) + +/** + * struct dev_info - Physical Device info information about physical layer. + * @dev: Pointer to native physical device. + * @id: Physical ID of the physical connection used by the + * logical CAIF connection. Used by service layers to + * identify their physical id to Caif MUX (CFMUXL)so + * that the MUX can add the correct physical ID to the + * packet. + */ +struct dev_info { + void *dev; + unsigned int id; +}; + +/** + * struct caif_payload_info - Payload information embedded in packet (sk_buff). + * + * @dev_info: Information about the receiving device. + * + * @hdr_len: Header length, used to align pay load on 32bit boundary. + * + * @channel_id: Channel ID of the logical CAIF connection. + * Used by mux to insert channel id into the caif packet. + */ +struct caif_payload_info { + struct dev_info *dev_info; + unsigned short hdr_len; + unsigned short channel_id; +}; + +#endif /* CAIF_LAYER_H_ */ diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h new file mode 100644 index 00000000000..aa6a485b054 --- /dev/null +++ b/include/net/caif/caif_spi.h @@ -0,0 +1,155 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_SPI_H_ +#define CAIF_SPI_H_ + +#include <net/caif/caif_device.h> + +#define SPI_CMD_WR 0x00 +#define SPI_CMD_RD 0x01 +#define SPI_CMD_EOT 0x02 +#define SPI_CMD_IND 0x04 + +#define SPI_DMA_BUF_LEN 8192 + +#define WL_SZ 2 /* 16 bits. */ +#define SPI_CMD_SZ 4 /* 32 bits. */ +#define SPI_IND_SZ 4 /* 32 bits. */ + +#define SPI_XFER 0 +#define SPI_SS_ON 1 +#define SPI_SS_OFF 2 +#define SPI_TERMINATE 3 + +/* Minimum time between different levels is 50 microseconds. */ +#define MIN_TRANSITION_TIME_USEC 50 + +/* Defines for calculating duration of SPI transfers for a particular + * number of bytes. + */ +#define SPI_MASTER_CLK_MHZ 13 +#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk) + +/* Normally this should be aligned on the modem in order to benefit from full + * duplex transfers. However a size of 8188 provokes errors when running with + * the modem. These errors occur when packet sizes approaches 4 kB of data. + */ +#define CAIF_MAX_SPI_FRAME 4092 + +/* Maximum number of uplink CAIF frames that can reside in the same SPI frame. + * This number should correspond with the modem setting. The application side + * CAIF accepts any number of embedded downlink CAIF frames. + */ +#define CAIF_MAX_SPI_PKTS 9 + +/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier + * debugging. Both TX and RX buffers will be filled before the transfer. + */ +#define CFSPI_DBG_PREFILL 0 + +/* Structure describing a SPI transfer. */ +struct cfspi_xfer { + u16 tx_dma_len; + u16 rx_dma_len; + void *va_tx[2]; + dma_addr_t pa_tx[2]; + void *va_rx; + dma_addr_t pa_rx; +}; + +/* Structure implemented by the SPI interface. */ +struct cfspi_ifc { + void (*ss_cb) (bool assert, struct cfspi_ifc *ifc); + void (*xfer_done_cb) (struct cfspi_ifc *ifc); + void *priv; +}; + +/* Structure implemented by SPI clients. */ +struct cfspi_dev { + int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev); + void (*sig_xfer) (bool xfer, struct cfspi_dev *dev); + struct cfspi_ifc *ifc; + char *name; + u32 clk_mhz; + void *priv; +}; + +/* Enumeration describing the CAIF SPI state. */ +enum cfspi_state { + CFSPI_STATE_WAITING = 0, + CFSPI_STATE_AWAKE, + CFSPI_STATE_FETCH_PKT, + CFSPI_STATE_GET_NEXT, + CFSPI_STATE_INIT_XFER, + CFSPI_STATE_WAIT_ACTIVE, + CFSPI_STATE_SIG_ACTIVE, + CFSPI_STATE_WAIT_XFER_DONE, + CFSPI_STATE_XFER_DONE, + CFSPI_STATE_WAIT_INACTIVE, + CFSPI_STATE_SIG_INACTIVE, + CFSPI_STATE_DELIVER_PKT, + CFSPI_STATE_MAX, +}; + +/* Structure implemented by SPI physical interfaces. */ +struct cfspi { + struct caif_dev_common cfdev; + struct net_device *ndev; + struct platform_device *pdev; + struct sk_buff_head qhead; + struct sk_buff_head chead; + u16 cmd; + u16 tx_cpck_len; + u16 tx_npck_len; + u16 rx_cpck_len; + u16 rx_npck_len; + struct cfspi_ifc ifc; + struct cfspi_xfer xfer; + struct cfspi_dev *dev; + unsigned long state; + struct work_struct work; + struct workqueue_struct *wq; + struct list_head list; + int flow_off_sent; + u32 qd_low_mark; + u32 qd_high_mark; + struct completion comp; + wait_queue_head_t wait; + spinlock_t lock; + bool flow_stop; + bool slave; + bool slave_talked; +#ifdef CONFIG_DEBUG_FS + enum cfspi_state dbg_state; + u16 pcmd; + u16 tx_ppck_len; + u16 rx_ppck_len; + struct dentry *dbgfs_dir; + struct dentry *dbgfs_state; + struct dentry *dbgfs_frame; +#endif /* CONFIG_DEBUG_FS */ +}; + +extern int spi_frm_align; +extern int spi_up_head_align; +extern int spi_up_tail_align; +extern int spi_down_head_align; +extern int spi_down_tail_align; +extern struct platform_driver cfspi_spi_driver; + +void cfspi_dbg_state(struct cfspi *cfspi, int state); +int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_xmitlen(struct cfspi *cfspi); +int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_spi_remove(struct platform_device *pdev); +int cfspi_spi_probe(struct platform_device *pdev); +int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_xmitlen(struct cfspi *cfspi); +int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len); +void cfspi_xfer(struct work_struct *work); + +#endif /* CAIF_SPI_H_ */ diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h new file mode 100644 index 00000000000..70bfd017581 --- /dev/null +++ b/include/net/caif/cfcnfg.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFCNFG_H_ +#define CFCNFG_H_ +#include <linux/spinlock.h> +#include <linux/netdevice.h> +#include <net/caif/caif_layer.h> +#include <net/caif/cfctrl.h> + +struct cfcnfg; + +/** + * enum cfcnfg_phy_preference - Physical preference HW Abstraction + * + * @CFPHYPREF_UNSPECIFIED: Default physical interface + * + * @CFPHYPREF_LOW_LAT: Default physical interface for low-latency + * traffic + * @CFPHYPREF_HIGH_BW: Default physical interface for high-bandwidth + * traffic + * @CFPHYPREF_LOOP: TEST only Loopback interface simulating modem + * responses. + * + */ +enum cfcnfg_phy_preference { + CFPHYPREF_UNSPECIFIED, + CFPHYPREF_LOW_LAT, + CFPHYPREF_HIGH_BW, + CFPHYPREF_LOOP +}; + +/** + * cfcnfg_create() - Get the CAIF configuration object given network. + * @net: Network for the CAIF configuration object. + */ +struct cfcnfg *get_cfcnfg(struct net *net); + +/** + * cfcnfg_create() - Create the CAIF configuration object. + */ +struct cfcnfg *cfcnfg_create(void); + +/** + * cfcnfg_remove() - Remove the CFCNFG object + * @cfg: config object + */ +void cfcnfg_remove(struct cfcnfg *cfg); + +/** + * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack. + * @cnfg: Pointer to a CAIF configuration object, created by + * cfcnfg_create(). + * @dev: Pointer to link layer device + * @phy_layer: Specify the physical layer. The transmit function + * MUST be set in the structure. + * @pref: The phy (link layer) preference. + * @link_support: Protocol implementation for link layer specific protocol. + * @fcs: Specify if checksum is used in CAIF Framing Layer. + * @head_room: Head space needed by link specific protocol. + */ +void +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + struct net_device *dev, struct cflayer *phy_layer, + enum cfcnfg_phy_preference pref, + struct cflayer *link_support, + bool fcs, int head_room); + +/** + * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack. + * + * @cnfg: Pointer to a CAIF configuration object, created by + * cfcnfg_create(). + * @phy_layer: Adaptation layer to be removed. + */ +int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer); + +/** + * cfcnfg_set_phy_state() - Set the state of the physical interface device. + * @cnfg: Configuration object + * @phy_layer: Physical Layer representation + * @up: State of device + */ +int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, + bool up); + +#endif /* CFCNFG_H_ */ diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h new file mode 100644 index 00000000000..f2ae33d23ba --- /dev/null +++ b/include/net/caif/cfctrl.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFCTRL_H_ +#define CFCTRL_H_ +#include <net/caif/caif_layer.h> +#include <net/caif/cfsrvl.h> + +/* CAIF Control packet commands */ +enum cfctrl_cmd { + CFCTRL_CMD_LINK_SETUP = 0, + CFCTRL_CMD_LINK_DESTROY = 1, + CFCTRL_CMD_LINK_ERR = 2, + CFCTRL_CMD_ENUM = 3, + CFCTRL_CMD_SLEEP = 4, + CFCTRL_CMD_WAKE = 5, + CFCTRL_CMD_LINK_RECONF = 6, + CFCTRL_CMD_START_REASON = 7, + CFCTRL_CMD_RADIO_SET = 8, + CFCTRL_CMD_MODEM_SET = 9, + CFCTRL_CMD_MASK = 0xf +}; + +/* Channel types */ +enum cfctrl_srv { + CFCTRL_SRV_DECM = 0, + CFCTRL_SRV_VEI = 1, + CFCTRL_SRV_VIDEO = 2, + CFCTRL_SRV_DBG = 3, + CFCTRL_SRV_DATAGRAM = 4, + CFCTRL_SRV_RFM = 5, + CFCTRL_SRV_UTIL = 6, + CFCTRL_SRV_MASK = 0xf +}; + +#define CFCTRL_RSP_BIT 0x20 +#define CFCTRL_ERR_BIT 0x10 + +struct cfctrl_rsp { + void (*linksetup_rsp)(struct cflayer *layer, u8 linkid, + enum cfctrl_srv serv, u8 phyid, + struct cflayer *adapt_layer); + void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid); + void (*linkerror_ind)(void); + void (*enum_rsp)(void); + void (*sleep_rsp)(void); + void (*wake_rsp)(void); + void (*restart_rsp)(void); + void (*radioset_rsp)(void); + void (*reject_rsp)(struct cflayer *layer, u8 linkid, + struct cflayer *client_layer); +}; + +/* Link Setup Parameters for CAIF-Links. */ +struct cfctrl_link_param { + enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */ + u8 priority; /* (P4,P0) Priority of the channel */ + u8 phyid; /* (U2-U0) Physical interface to connect */ + u8 endpoint; /* (E1,E0) Endpoint for data channels */ + u8 chtype; /* (H1,H0) Channel-Type, applies to + * VEI, DEBUG */ + union { + struct { + u8 connid; /* (D7,D0) Video LinkId */ + } video; + + struct { + u32 connid; /* (N31,Ngit0) Connection ID used + * for Datagram */ + } datagram; + + struct { + u32 connid; /* Connection ID used for RFM */ + char volume[20]; /* Volume to mount for RFM */ + } rfm; /* Configuration for RFM */ + + struct { + u16 fifosize_kb; /* Psock FIFO size in KB */ + u16 fifosize_bufs; /* Psock # signal buffers */ + char name[16]; /* Name of the PSOCK service */ + u8 params[255]; /* Link setup Parameters> */ + u16 paramlen; /* Length of Link Setup + * Parameters */ + } utility; /* Configuration for Utility Links (Psock) */ + } u; +}; + +/* This structure is used internally in CFCTRL */ +struct cfctrl_request_info { + int sequence_no; + enum cfctrl_cmd cmd; + u8 channel_id; + struct cfctrl_link_param param; + struct cflayer *client_layer; + struct list_head list; +}; + +struct cfctrl { + struct cfsrvl serv; + struct cfctrl_rsp res; + atomic_t req_seq_no; + atomic_t rsp_seq_no; + struct list_head list; + /* Protects from simultaneous access to first_req list */ + spinlock_t info_list_lock; +#ifndef CAIF_NO_LOOP + u8 loop_linkid; + int loop_linkused[256]; + /* Protects simultaneous access to loop_linkid and loop_linkused */ + spinlock_t loop_linkid_lock; +#endif + +}; + +void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid); +int cfctrl_linkup_request(struct cflayer *cfctrl, + struct cfctrl_link_param *param, + struct cflayer *user_layer); +int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid, + struct cflayer *client); + +struct cflayer *cfctrl_create(void); +struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer); +int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer); +void cfctrl_remove(struct cflayer *layr); + +#endif /* CFCTRL_H_ */ diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h new file mode 100644 index 00000000000..a06e33fbaa8 --- /dev/null +++ b/include/net/caif/cffrml.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFFRML_H_ +#define CFFRML_H_ +#include <net/caif/caif_layer.h> +#include <linux/netdevice.h> + +struct cffrml; +struct cflayer *cffrml_create(u16 phyid, bool use_fcs); +void cffrml_free(struct cflayer *layr); +void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up); +void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn); +void cffrml_put(struct cflayer *layr); +void cffrml_hold(struct cflayer *layr); +int cffrml_refcnt_read(struct cflayer *layr); + +#endif /* CFFRML_H_ */ diff --git a/include/net/caif/cfmuxl.h b/include/net/caif/cfmuxl.h new file mode 100644 index 00000000000..752999572f2 --- /dev/null +++ b/include/net/caif/cfmuxl.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFMUXL_H_ +#define CFMUXL_H_ +#include <net/caif/caif_layer.h> + +struct cfsrvl; +struct cffrml; + +struct cflayer *cfmuxl_create(void); +int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid); +struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid); +int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *up, u8 phyid); +struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 linkid); + +#endif /* CFMUXL_H_ */ diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h new file mode 100644 index 00000000000..1c1ad46250d --- /dev/null +++ b/include/net/caif/cfpkt.h @@ -0,0 +1,205 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFPKT_H_ +#define CFPKT_H_ +#include <net/caif/caif_layer.h> +#include <linux/types.h> +struct cfpkt; + +/* Create a CAIF packet. + * len: Length of packet to be created + * @return New packet. + */ +struct cfpkt *cfpkt_create(u16 len); + +/* + * Destroy a CAIF Packet. + * pkt Packet to be destoyed. + */ +void cfpkt_destroy(struct cfpkt *pkt); + +/* + * Extract header from packet. + * + * pkt Packet to extract header data from. + * data Pointer to copy the header data into. + * len Length of head data to copy. + * @return zero on success and error code upon failure + */ +int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len); + +/* + * Peek header from packet. + * Reads data from packet without changing packet. + * + * pkt Packet to extract header data from. + * data Pointer to copy the header data into. + * len Length of head data to copy. + * @return zero on success and error code upon failure + */ +int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len); + +/* + * Extract header from trailer (end of packet). + * + * pkt Packet to extract header data from. + * data Pointer to copy the trailer data into. + * len Length of header data to copy. + * @return zero on success and error code upon failure + */ +int cfpkt_extr_trail(struct cfpkt *pkt, void *data, u16 len); + +/* + * Add header to packet. + * + * + * pkt Packet to add header data to. + * data Pointer to data to copy into the header. + * len Length of header data to copy. + * @return zero on success and error code upon failure + */ +int cfpkt_add_head(struct cfpkt *pkt, const void *data, u16 len); + +/* + * Add trailer to packet. + * + * + * pkt Packet to add trailer data to. + * data Pointer to data to copy into the trailer. + * len Length of trailer data to copy. + * @return zero on success and error code upon failure + */ +int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len); + +/* + * Pad trailer on packet. + * Moves data pointer in packet, no content copied. + * + * pkt Packet in which to pad trailer. + * len Length of padding to add. + * @return zero on success and error code upon failure + */ +int cfpkt_pad_trail(struct cfpkt *pkt, u16 len); + +/* + * Add a single byte to packet body (tail). + * + * pkt Packet in which to add byte. + * data Byte to add. + * @return zero on success and error code upon failure + */ +int cfpkt_addbdy(struct cfpkt *pkt, const u8 data); + +/* + * Add a data to packet body (tail). + * + * pkt Packet in which to add data. + * data Pointer to data to copy into the packet body. + * len Length of data to add. + * @return zero on success and error code upon failure + */ +int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len); + +/* + * Checks whether there are more data to process in packet. + * pkt Packet to check. + * @return true if more data are available in packet false otherwise + */ +bool cfpkt_more(struct cfpkt *pkt); + +/* + * Checks whether the packet is erroneous, + * i.e. if it has been attempted to extract more data than available in packet + * or writing more data than has been allocated in cfpkt_create(). + * pkt Packet to check. + * @return true on error false otherwise + */ +bool cfpkt_erroneous(struct cfpkt *pkt); + +/* + * Get the packet length. + * pkt Packet to get length from. + * @return Number of bytes in packet. + */ +u16 cfpkt_getlen(struct cfpkt *pkt); + +/* + * Set the packet length, by adjusting the trailer pointer according to length. + * pkt Packet to set length. + * len Packet length. + * @return Number of bytes in packet. + */ +int cfpkt_setlen(struct cfpkt *pkt, u16 len); + +/* + * cfpkt_append - Appends a packet's data to another packet. + * dstpkt: Packet to append data into, WILL BE FREED BY THIS FUNCTION + * addpkt: Packet to be appended and automatically released, + * WILL BE FREED BY THIS FUNCTION. + * expectlen: Packet's expected total length. This should be considered + * as a hint. + * NB: Input packets will be destroyed after appending and cannot be used + * after calling this function. + * @return The new appended packet. + */ +struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, + u16 expectlen); + +/* + * cfpkt_split - Split a packet into two packets at the specified split point. + * pkt: Packet to be split (will contain the first part of the data on exit) + * pos: Position to split packet in two parts. + * @return The new packet, containing the second part of the data. + */ +struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos); + +/* + * Iteration function, iterates the packet buffers from start to end. + * + * Checksum iteration function used to iterate buffers + * (we may have packets consisting of a chain of buffers) + * pkt: Packet to calculate checksum for + * iter_func: Function pointer to iteration function + * chks: Checksum calculated so far. + * buf: Pointer to the buffer to checksum + * len: Length of buf. + * data: Initial checksum value. + * @return Checksum of buffer. + */ + +u16 cfpkt_iterate(struct cfpkt *pkt, + u16 (*iter_func)(u16 chks, void *buf, u16 len), + u16 data); + +/* Map from a "native" packet (e.g. Linux Socket Buffer) to a CAIF packet. + * dir - Direction indicating whether this packet is to be sent or received. + * nativepkt - The native packet to be transformed to a CAIF packet + * @return The mapped CAIF Packet CFPKT. + */ +struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt); + +/* Map from a CAIF packet to a "native" packet (e.g. Linux Socket Buffer). + * pkt - The CAIF packet to be transformed into a "native" packet. + * @return The native packet transformed from a CAIF packet. + */ +void *cfpkt_tonative(struct cfpkt *pkt); + +/* + * Returns packet information for a packet. + * pkt Packet to get info from; + * @return Packet information + */ +struct caif_payload_info *cfpkt_info(struct cfpkt *pkt); + +/** cfpkt_set_prio - set priority for a CAIF packet. + * + * @pkt: The CAIF packet to be adjusted. + * @prio: one of TC_PRIO_ constants. + */ +void cfpkt_set_prio(struct cfpkt *pkt, int prio); + +#endif /* CFPKT_H_ */ diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h new file mode 100644 index 00000000000..b5b020f3c72 --- /dev/null +++ b/include/net/caif/cfserl.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFSERL_H_ +#define CFSERL_H_ +#include <net/caif/caif_layer.h> + +struct cflayer *cfserl_create(int instance, bool use_stx); +#endif diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h new file mode 100644 index 00000000000..cd47705c2cc --- /dev/null +++ b/include/net/caif/cfsrvl.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CFSRVL_H_ +#define CFSRVL_H_ +#include <linux/list.h> +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/rculist.h> + +struct cfsrvl { + struct cflayer layer; + bool open; + bool phy_flow_on; + bool modem_flow_on; + bool supports_flowctrl; + void (*release)(struct cflayer *layer); + struct dev_info dev_info; + void (*hold)(struct cflayer *lyr); + void (*put)(struct cflayer *lyr); + struct rcu_head rcu; +}; + +struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info); +struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info); +struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info); +struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info); +struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info, + int mtu_size); +struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info); + +void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, + int phyid); + +bool cfsrvl_phyid_match(struct cflayer *layer, int phyid); + +void cfsrvl_init(struct cfsrvl *service, + u8 channel_id, + struct dev_info *dev_info, + bool supports_flowctrl); +bool cfsrvl_ready(struct cfsrvl *service, int *err); +u8 cfsrvl_getphyid(struct cflayer *layer); + +static inline void cfsrvl_get(struct cflayer *layr) +{ + struct cfsrvl *s = container_of(layr, struct cfsrvl, layer); + if (layr == NULL || layr->up == NULL || s->hold == NULL) + return; + + s->hold(layr->up); +} + +static inline void cfsrvl_put(struct cflayer *layr) +{ + struct cfsrvl *s = container_of(layr, struct cfsrvl, layer); + if (layr == NULL || layr->up == NULL || s->hold == NULL) + return; + + s->put(layr->up); +} +#endif /* CFSRVL_H_ */ diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h new file mode 100644 index 00000000000..25baddc4fbe --- /dev/null +++ b/include/net/cfg80211-wext.h @@ -0,0 +1,55 @@ +#ifndef __NET_CFG80211_WEXT_H +#define __NET_CFG80211_WEXT_H +/* + * 802.11 device and configuration interface -- wext handlers + * + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/iw_handler.h> + +/* + * Temporary wext handlers & helper functions + * + * These are used only by drivers that aren't yet fully + * converted to cfg80211. + */ +int cfg80211_wext_giwname(struct net_device *dev, + struct iw_request_info *info, + char *name, char *extra); +int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra); +int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra); +int cfg80211_wext_siwscan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); +int cfg80211_wext_giwscan(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_giwrange(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_siwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra); +int cfg80211_wext_giwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra); +int cfg80211_wext_siwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra); +int cfg80211_wext_giwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra); +int cfg80211_wext_giwretry(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *retry, char *extra); + +#endif /* __NET_CFG80211_WEXT_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bcc480b8892..e46c437944f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1,55 +1,333 @@ #ifndef __NET_CFG80211_H #define __NET_CFG80211_H +/* + * 802.11 device and configuration interface + * + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/netdevice.h> +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/bug.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/nl80211.h> -#include <net/genetlink.h> +#include <linux/if_ether.h> +#include <linux/ieee80211.h> +#include <linux/net.h> +#include <net/regulatory.h> + +/** + * DOC: Introduction + * + * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges + * userspace and drivers, and offers some utility functionality associated + * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used + * by all modern wireless drivers in Linux, so that they offer a consistent + * API through nl80211. For backward compatibility, cfg80211 also offers + * wireless extensions to userspace, but hides them from drivers completely. + * + * Additionally, cfg80211 contains code to help enforce regulatory spectrum + * use restrictions. + */ + + +/** + * DOC: Device registration + * + * In order for a driver to use cfg80211, it must register the hardware device + * with cfg80211. This happens through a number of hardware capability structs + * described below. + * + * The fundamental structure for each device is the 'wiphy', of which each + * instance describes a physical wireless device connected to the system. Each + * such wiphy can have zero, one, or many virtual interfaces associated with + * it, which need to be identified as such by pointing the network interface's + * @ieee80211_ptr pointer to a &struct wireless_dev which further describes + * the wireless part of the interface, normally this struct is embedded in the + * network interface's private data area. Drivers can optionally allow creating + * or destroying virtual interfaces on the fly, but without at least one or the + * ability to create some the wireless device isn't useful. + * + * Each wiphy structure contains device capability information, and also has + * a pointer to the various operations the driver offers. The definitions and + * structures here describe these capabilities in detail. + */ + +struct wiphy; /* - * 802.11 configuration in-kernel interface + * wireless hardware capability structures + */ + +/** + * enum ieee80211_band - supported frequency bands * - * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> + * The bands are assigned this way because the supported + * bitrates differ in these bands. + * + * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band + * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) + * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @IEEE80211_NUM_BANDS: number of defined bands */ +enum ieee80211_band { + IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ, + IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ, + IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ, -/* Radiotap header iteration - * implemented in net/wireless/radiotap.c - * docs in Documentation/networking/radiotap-headers.txt + /* keep last */ + IEEE80211_NUM_BANDS +}; + +/** + * enum ieee80211_channel_flags - channel flags + * + * Channel flags set by the regulatory control code. + * + * @IEEE80211_CHAN_DISABLED: This channel is disabled. + * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes + * sending probe requests or beaconing. + * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. + * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel + * is not permitted. + * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel + * is not permitted. + * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. + * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band, + * this flag indicates that an 80 MHz channel cannot use this + * channel as the control or any of the secondary channels. + * This may be due to the driver or due to regulatory bandwidth + * restrictions. + * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band, + * this flag indicates that an 160 MHz channel cannot use this + * channel as the control or any of the secondary channels. + * This may be due to the driver or due to regulatory bandwidth + * restrictions. + * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY + * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT + * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted + * on this channel. + * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted + * on this channel. + * */ +enum ieee80211_channel_flags { + IEEE80211_CHAN_DISABLED = 1<<0, + IEEE80211_CHAN_NO_IR = 1<<1, + /* hole at 1<<2 */ + IEEE80211_CHAN_RADAR = 1<<3, + IEEE80211_CHAN_NO_HT40PLUS = 1<<4, + IEEE80211_CHAN_NO_HT40MINUS = 1<<5, + IEEE80211_CHAN_NO_OFDM = 1<<6, + IEEE80211_CHAN_NO_80MHZ = 1<<7, + IEEE80211_CHAN_NO_160MHZ = 1<<8, + IEEE80211_CHAN_INDOOR_ONLY = 1<<9, + IEEE80211_CHAN_GO_CONCURRENT = 1<<10, + IEEE80211_CHAN_NO_20MHZ = 1<<11, + IEEE80211_CHAN_NO_10MHZ = 1<<12, +}; + +#define IEEE80211_CHAN_NO_HT40 \ + (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + +#define IEEE80211_DFS_MIN_CAC_TIME_MS 60000 +#define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000) + /** - * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args - * @rtheader: pointer to the radiotap header we are walking through - * @max_length: length of radiotap header in cpu byte ordering - * @this_arg_index: IEEE80211_RADIOTAP_... index of current arg - * @this_arg: pointer to current radiotap arg - * @arg_index: internal next argument index - * @arg: internal next argument pointer - * @next_bitmap: internal pointer to next present u32 - * @bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present + * struct ieee80211_channel - channel definition + * + * This structure describes a single channel for use + * with cfg80211. + * + * @center_freq: center frequency in MHz + * @hw_value: hardware-specific value for the channel + * @flags: channel flags from &enum ieee80211_channel_flags. + * @orig_flags: channel flags at registration time, used by regulatory + * code to support devices with additional restrictions + * @band: band this channel belongs to. + * @max_antenna_gain: maximum antenna gain in dBi + * @max_power: maximum transmission power (in dBm) + * @max_reg_power: maximum regulatory transmission power (in dBm) + * @beacon_found: helper to regulatory code to indicate when a beacon + * has been found on this channel. Use regulatory_hint_found_beacon() + * to enable this, this is useful only on 5 GHz band. + * @orig_mag: internal use + * @orig_mpwr: internal use + * @dfs_state: current state of this channel. Only relevant if radar is required + * on this channel. + * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered. + * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. */ +struct ieee80211_channel { + enum ieee80211_band band; + u16 center_freq; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + int max_reg_power; + bool beacon_found; + u32 orig_flags; + int orig_mag, orig_mpwr; + enum nl80211_dfs_state dfs_state; + unsigned long dfs_state_entered; + unsigned int dfs_cac_ms; +}; -struct ieee80211_radiotap_iterator { - struct ieee80211_radiotap_header *rtheader; - int max_length; - int this_arg_index; - u8 *this_arg; +/** + * enum ieee80211_rate_flags - rate flags + * + * Hardware/specification flags for rates. These are structured + * in a way that allows using the same bitrate structure for + * different bands/PHY modes. + * + * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short + * preamble on this bitrate; only relevant in 2.4GHz band and + * with CCK rates. + * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate + * when used with 802.11a (on the 5 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate + * when used with 802.11b (on the 2.4 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate + * when used with 802.11g (on the 2.4 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. + * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode + * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode + */ +enum ieee80211_rate_flags { + IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, + IEEE80211_RATE_MANDATORY_A = 1<<1, + IEEE80211_RATE_MANDATORY_B = 1<<2, + IEEE80211_RATE_MANDATORY_G = 1<<3, + IEEE80211_RATE_ERP_G = 1<<4, + IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5, + IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6, +}; - int arg_index; - u8 *arg; - __le32 *next_bitmap; - u32 bitmap_shifter; +/** + * struct ieee80211_rate - bitrate definition + * + * This structure describes a bitrate that an 802.11 PHY can + * operate with. The two values @hw_value and @hw_value_short + * are only for driver use when pointers to this structure are + * passed around. + * + * @flags: rate-specific flags + * @bitrate: bitrate in units of 100 Kbps + * @hw_value: driver/hardware value for this rate + * @hw_value_short: driver/hardware value for this rate when + * short preamble is used + */ +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value, hw_value_short; }; -extern int ieee80211_radiotap_iterator_init( - struct ieee80211_radiotap_iterator *iterator, - struct ieee80211_radiotap_header *radiotap_header, - int max_length); +/** + * struct ieee80211_sta_ht_cap - STA's HT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11n HT capabilities for an STA. + * + * @ht_supported: is HT supported by the STA + * @cap: HT capabilities map as described in 802.11n spec + * @ampdu_factor: Maximum A-MPDU length factor + * @ampdu_density: Minimum A-MPDU spacing + * @mcs: Supported MCS rates + */ +struct ieee80211_sta_ht_cap { + u16 cap; /* use IEEE80211_HT_CAP_ */ + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; +}; -extern int ieee80211_radiotap_iterator_next( - struct ieee80211_radiotap_iterator *iterator); +/** + * struct ieee80211_sta_vht_cap - STA's VHT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11ac VHT capabilities for an STA. + * + * @vht_supported: is VHT supported by the STA + * @cap: VHT capabilities map as described in 802.11ac spec + * @vht_mcs: Supported VHT MCS rates + */ +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; /* use IEEE80211_VHT_CAP_ */ + struct ieee80211_vht_mcs_info vht_mcs; +}; + +/** + * struct ieee80211_supported_band - frequency band definition + * + * This structure describes a frequency band a wiphy + * is able to operate in. + * + * @channels: Array of channels the hardware can operate in + * in this band. + * @band: the band this structure represents + * @n_channels: Number of channels in @channels + * @bitrates: Array of bitrates the hardware can operate with + * in this band. Must be sorted to give a valid "supported + * rates" IE, i.e. CCK rates first, then OFDM. + * @n_bitrates: Number of bitrates in @bitrates + * @ht_cap: HT capabilities in this band + * @vht_cap: VHT capabilities in this band + */ +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum ieee80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; +}; + +/* + * Wireless hardware/device configuration structures and methods + */ + +/** + * DOC: Actions and configuration + * + * Each wireless device and each virtual interface offer a set of configuration + * operations and other actions that are invoked by userspace. Each of these + * actions is described in the operations structure, and the parameters these + * operations use are described separately. + * + * Additionally, some operations are asynchronous and expect to get status + * information via some functions that drivers need to call. + * + * Scanning and BSS list handling with its associated functionality is described + * in a separate chapter. + */ +/** + * struct vif_params - describes virtual interface parameters + * @use_4addr: use 4-address frames + * @macaddr: address to use for this virtual interface. This will only + * be used for non-netdevice interfaces. If this parameter is set + * to zero address the driver may determine the address as needed. + */ +struct vif_params { + int use_4addr; + u8 macaddr[ETH_ALEN]; +}; - /** +/** * struct key_params - key information * * Information about a key @@ -60,52 +338,397 @@ extern int ieee80211_radiotap_iterator_next( * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used * with the get_key() callback, must be in little endian, * length given by @seq_len. + * @seq_len: length of @seq. */ struct key_params { - u8 *key; - u8 *seq; + const u8 *key; + const u8 *seq; int key_len; int seq_len; u32 cipher; }; /** - * struct beacon_parameters - beacon parameters + * struct cfg80211_chan_def - channel definition + * @chan: the (control) channel + * @width: channel width + * @center_freq1: center frequency of first segment + * @center_freq2: center frequency of second segment + * (only with 80+80 MHz) + */ +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; +}; + +/** + * cfg80211_get_chandef_type - return old channel type from chandef + * @chandef: the channel definition + * + * Return: The old channel type (NOHT, HT20, HT40+/-) from a given + * chandef, which must have a bandwidth allowing this conversion. + */ +static inline enum nl80211_channel_type +cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + return NL80211_CHAN_NO_HT; + case NL80211_CHAN_WIDTH_20: + return NL80211_CHAN_HT20; + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 > chandef->chan->center_freq) + return NL80211_CHAN_HT40PLUS; + return NL80211_CHAN_HT40MINUS; + default: + WARN_ON(1); + return NL80211_CHAN_NO_HT; + } +} + +/** + * cfg80211_chandef_create - create channel definition using channel type + * @chandef: the channel definition struct to fill + * @channel: the control channel + * @chantype: the channel type + * + * Given a channel type, create a channel definition. + */ +void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, + struct ieee80211_channel *channel, + enum nl80211_channel_type chantype); + +/** + * cfg80211_chandef_identical - check if two channel definitions are identical + * @chandef1: first channel definition + * @chandef2: second channel definition + * + * Return: %true if the channels defined by the channel definitions are + * identical, %false otherwise. + */ +static inline bool +cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, + const struct cfg80211_chan_def *chandef2) +{ + return (chandef1->chan == chandef2->chan && + chandef1->width == chandef2->width && + chandef1->center_freq1 == chandef2->center_freq1 && + chandef1->center_freq2 == chandef2->center_freq2); +} + +/** + * cfg80211_chandef_compatible - check if two channel definitions are compatible + * @chandef1: first channel definition + * @chandef2: second channel definition + * + * Return: %NULL if the given channel definitions are incompatible, + * chandef1 or chandef2 otherwise. + */ +const struct cfg80211_chan_def * +cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, + const struct cfg80211_chan_def *chandef2); + +/** + * cfg80211_chandef_valid - check if a channel definition is valid + * @chandef: the channel definition to check + * Return: %true if the channel definition is valid. %false otherwise. + */ +bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef); + +/** + * cfg80211_chandef_usable - check if secondary channels can be used + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * @prohibited_flags: the regulatory channel flags that must not be set + * Return: %true if secondary channels are usable. %false otherwise. + */ +bool cfg80211_chandef_usable(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef, + u32 prohibited_flags); + +/** + * cfg80211_chandef_dfs_required - checks if radar detection is required + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * @iftype: the interface type as specified in &enum nl80211_iftype + * Returns: + * 1 if radar detection is required, 0 if it is not, < 0 on error + */ +int cfg80211_chandef_dfs_required(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype); + +/** + * ieee80211_chandef_rate_flags - returns rate flags for a channel * - * Used to configure the beacon for an interface. + * In some channel types, not all rates may be used - for example CCK + * rates may not be used in 5/10 MHz channels. * + * @chandef: channel definition for the channel + * + * Returns: rate flags which apply for this channel + */ +static inline enum ieee80211_rate_flags +ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return IEEE80211_RATE_SUPPORTS_5MHZ; + case NL80211_CHAN_WIDTH_10: + return IEEE80211_RATE_SUPPORTS_10MHZ; + default: + break; + } + return 0; +} + +/** + * ieee80211_chandef_max_power - maximum transmission power for the chandef + * + * In some regulations, the transmit power may depend on the configured channel + * bandwidth which may be defined as dBm/MHz. This function returns the actual + * max_power for non-standard (20 MHz) channels. + * + * @chandef: channel definition for the channel + * + * Returns: maximum allowed transmission power in dBm for the chandef + */ +static inline int +ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return min(chandef->chan->max_reg_power - 6, + chandef->chan->max_power); + case NL80211_CHAN_WIDTH_10: + return min(chandef->chan->max_reg_power - 3, + chandef->chan->max_power); + default: + break; + } + return chandef->chan->max_power; +} + +/** + * enum survey_info_flags - survey information flags + * + * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in + * @SURVEY_INFO_IN_USE: channel is currently being used + * @SURVEY_INFO_CHANNEL_TIME: channel active time (in ms) was filled in + * @SURVEY_INFO_CHANNEL_TIME_BUSY: channel busy time was filled in + * @SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: extension channel busy time was filled in + * @SURVEY_INFO_CHANNEL_TIME_RX: channel receive time was filled in + * @SURVEY_INFO_CHANNEL_TIME_TX: channel transmit time was filled in + * + * Used by the driver to indicate which info in &struct survey_info + * it has filled in during the get_survey(). + */ +enum survey_info_flags { + SURVEY_INFO_NOISE_DBM = 1<<0, + SURVEY_INFO_IN_USE = 1<<1, + SURVEY_INFO_CHANNEL_TIME = 1<<2, + SURVEY_INFO_CHANNEL_TIME_BUSY = 1<<3, + SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 1<<4, + SURVEY_INFO_CHANNEL_TIME_RX = 1<<5, + SURVEY_INFO_CHANNEL_TIME_TX = 1<<6, +}; + +/** + * struct survey_info - channel survey response + * + * @channel: the channel this survey record reports, mandatory + * @filled: bitflag of flags from &enum survey_info_flags + * @noise: channel noise in dBm. This and all following fields are + * optional + * @channel_time: amount of time in ms the radio spent on the channel + * @channel_time_busy: amount of time the primary channel was sensed busy + * @channel_time_ext_busy: amount of time the extension channel was sensed busy + * @channel_time_rx: amount of time the radio spent receiving data + * @channel_time_tx: amount of time the radio spent transmitting data + * + * Used by dump_survey() to report back per-channel survey information. + * + * This structure can later be expanded with things like + * channel duty cycle etc. + */ +struct survey_info { + struct ieee80211_channel *channel; + u64 channel_time; + u64 channel_time_busy; + u64 channel_time_ext_busy; + u64 channel_time_rx; + u64 channel_time_tx; + u32 filled; + s8 noise; +}; + +/** + * struct cfg80211_crypto_settings - Crypto settings + * @wpa_versions: indicates which, if any, WPA versions are enabled + * (from enum nl80211_wpa_versions) + * @cipher_group: group key cipher suite (or 0 if unset) + * @n_ciphers_pairwise: number of AP supported unicast ciphers + * @ciphers_pairwise: unicast key cipher suites + * @n_akm_suites: number of AKM suites + * @akm_suites: AKM suites + * @control_port: Whether user space controls IEEE 802.1X port, i.e., + * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is + * required to assume that the port is unauthorized until authorized by + * user space. Otherwise, port is marked authorized by default. + * @control_port_ethertype: the control port protocol that should be + * allowed through even on unauthorized ports + * @control_port_no_encrypt: TRUE to prevent encryption of control port + * protocol frames. + */ +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES]; + int n_akm_suites; + u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; + bool control_port; + __be16 control_port_ethertype; + bool control_port_no_encrypt; +}; + +/** + * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) - * or %NULL if not changed + * or %NULL if not changed * @tail: tail portion of beacon (after TIM IE) - * or %NULL if not changed - * @interval: beacon interval or zero if not changed - * @dtim_period: DTIM period or zero if not changed + * or %NULL if not changed * @head_len: length of @head * @tail_len: length of @tail + * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL + * @beacon_ies_len: length of beacon_ies in octets + * @proberesp_ies: extra information element(s) to add into Probe Response + * frames or %NULL + * @proberesp_ies_len: length of proberesp_ies in octets + * @assocresp_ies: extra information element(s) to add into (Re)Association + * Response frames or %NULL + * @assocresp_ies_len: length of assocresp_ies in octets + * @probe_resp_len: length of probe response template (@probe_resp) + * @probe_resp: probe response template (AP mode only) */ -struct beacon_parameters { - u8 *head, *tail; - int interval, dtim_period; - int head_len, tail_len; +struct cfg80211_beacon_data { + const u8 *head, *tail; + const u8 *beacon_ies; + const u8 *proberesp_ies; + const u8 *assocresp_ies; + const u8 *probe_resp; + + size_t head_len, tail_len; + size_t beacon_ies_len; + size_t proberesp_ies_len; + size_t assocresp_ies_len; + size_t probe_resp_len; +}; + +struct mac_address { + u8 addr[ETH_ALEN]; }; /** - * enum station_flags - station flags + * struct cfg80211_acl_data - Access control list data * - * Station capability flags. Note that these must be the bits - * according to the nl80211 flags. + * @acl_policy: ACL policy to be applied on the station's + * entry specified by mac_addr + * @n_acl_entries: Number of MAC address entries passed + * @mac_addrs: List of MAC addresses of stations to be used for ACL + */ +struct cfg80211_acl_data { + enum nl80211_acl_policy acl_policy; + int n_acl_entries; + + /* Keep it last */ + struct mac_address mac_addrs[]; +}; + +/** + * struct cfg80211_ap_settings - AP configuration * - * @STATION_FLAG_CHANGED: station flags were changed - * @STATION_FLAG_AUTHORIZED: station is authorized to send frames (802.1X) - * @STATION_FLAG_SHORT_PREAMBLE: station is capable of receiving frames - * with short preambles - * @STATION_FLAG_WME: station is WME/QoS capable + * Used to configure an AP interface. + * + * @chandef: defines the channel to use + * @beacon: beacon data + * @beacon_interval: beacon interval + * @dtim_period: DTIM period + * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from + * user space) + * @ssid_len: length of @ssid + * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames + * @crypto: crypto settings + * @privacy: the BSS uses privacy + * @auth_type: Authentication type (algorithm) + * @inactivity_timeout: time in seconds to determine station's inactivity. + * @p2p_ctwindow: P2P CT Window + * @p2p_opp_ps: P2P opportunistic PS + * @acl: ACL configuration used by the drivers which has support for + * MAC address based access control + */ +struct cfg80211_ap_settings { + struct cfg80211_chan_def chandef; + + struct cfg80211_beacon_data beacon; + + int beacon_interval, dtim_period; + const u8 *ssid; + size_t ssid_len; + enum nl80211_hidden_ssid hidden_ssid; + struct cfg80211_crypto_settings crypto; + bool privacy; + enum nl80211_auth_type auth_type; + int inactivity_timeout; + u8 p2p_ctwindow; + bool p2p_opp_ps; + const struct cfg80211_acl_data *acl; +}; + +/** + * struct cfg80211_csa_settings - channel switch settings + * + * Used for channel switch + * + * @chandef: defines the channel to use after the switch + * @beacon_csa: beacon data while performing the switch + * @counter_offsets_beacon: offsets of the counters within the beacon (tail) + * @counter_offsets_presp: offsets of the counters within the probe response + * @n_counter_offsets_beacon: number of csa counters the beacon (tail) + * @n_counter_offsets_presp: number of csa counters in the probe response + * @beacon_after: beacon data to be used on the new channel + * @radar_required: whether radar detection is required on the new channel + * @block_tx: whether transmissions should be blocked while changing + * @count: number of beacons until switch */ -enum station_flags { - STATION_FLAG_CHANGED = 1<<0, - STATION_FLAG_AUTHORIZED = 1<<NL80211_STA_FLAG_AUTHORIZED, - STATION_FLAG_SHORT_PREAMBLE = 1<<NL80211_STA_FLAG_SHORT_PREAMBLE, - STATION_FLAG_WME = 1<<NL80211_STA_FLAG_WME, +struct cfg80211_csa_settings { + struct cfg80211_chan_def chandef; + struct cfg80211_beacon_data beacon_csa; + const u16 *counter_offsets_beacon; + const u16 *counter_offsets_presp; + unsigned int n_counter_offsets_beacon; + unsigned int n_counter_offsets_presp; + struct cfg80211_beacon_data beacon_after; + bool radar_required; + bool block_tx; + u8 count; +}; + +/** + * enum station_parameters_apply_mask - station parameter values to apply + * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) + * @STATION_PARAM_APPLY_CAPABILITY: apply new capability + * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state + * + * Not all station parameters have in-band "no change" signalling, + * for those that don't these flags will are used. + */ +enum station_parameters_apply_mask { + STATION_PARAM_APPLY_UAPSD = BIT(0), + STATION_PARAM_APPLY_CAPABILITY = BIT(1), + STATION_PARAM_APPLY_PLINK_STATE = BIT(2), }; /** @@ -117,54 +740,1334 @@ enum station_flags { * @supported_rates: supported rates in IEEE 802.11 format * (or NULL for no change) * @supported_rates_len: number of supported rates - * @station_flags: station flags (see &enum station_flags) + * @sta_flags_mask: station flags that changed + * (bitmask of BIT(NL80211_STA_FLAG_...)) + * @sta_flags_set: station flags values + * (bitmask of BIT(NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change + * @plink_action: plink action to take + * @plink_state: set the peer link state for a station + * @ht_capa: HT capabilities of station + * @vht_capa: VHT capabilities of station + * @uapsd_queues: bitmap of queues configured for uapsd. same format + * as the AC bitmap in the QoS info field + * @max_sp: max Service Period. same format as the MAX_SP in the + * QoS info field (but already shifted down) + * @sta_modify_mask: bitmap indicating which parameters changed + * (for those that don't have a natural "no change" value), + * see &enum station_parameters_apply_mask + * @local_pm: local link-specific mesh power save mode (no change when set + * to unknown) + * @capability: station capability + * @ext_capab: extended capabilities of the station + * @ext_capab_len: number of extended capabilities + * @supported_channels: supported channels in IEEE 802.11 format + * @supported_channels_len: number of supported channels + * @supported_oper_classes: supported oper classes in IEEE 802.11 format + * @supported_oper_classes_len: number of supported operating classes + * @opmode_notif: operating mode field from Operating Mode Notification + * @opmode_notif_used: information if operating mode field is used */ struct station_parameters { - u8 *supported_rates; + const u8 *supported_rates; struct net_device *vlan; - u32 station_flags; + u32 sta_flags_mask, sta_flags_set; + u32 sta_modify_mask; int listen_interval; u16 aid; u8 supported_rates_len; + u8 plink_action; + u8 plink_state; + const struct ieee80211_ht_cap *ht_capa; + const struct ieee80211_vht_cap *vht_capa; + u8 uapsd_queues; + u8 max_sp; + enum nl80211_mesh_power_mode local_pm; + u16 capability; + const u8 *ext_capab; + u8 ext_capab_len; + const u8 *supported_channels; + u8 supported_channels_len; + const u8 *supported_oper_classes; + u8 supported_oper_classes_len; + u8 opmode_notif; + bool opmode_notif_used; +}; + +/** + * enum cfg80211_station_type - the type of station being modified + * @CFG80211_STA_AP_CLIENT: client of an AP interface + * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has + * the AP MLME in the device + * @CFG80211_STA_AP_STA: AP station on managed interface + * @CFG80211_STA_IBSS: IBSS station + * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry + * while TDLS setup is in progress, it moves out of this state when + * being marked authorized; use this only if TDLS with external setup is + * supported/used) + * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active + * entry that is operating, has been marked authorized by userspace) + * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed) + * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed) + */ +enum cfg80211_station_type { + CFG80211_STA_AP_CLIENT, + CFG80211_STA_AP_MLME_CLIENT, + CFG80211_STA_AP_STA, + CFG80211_STA_IBSS, + CFG80211_STA_TDLS_PEER_SETUP, + CFG80211_STA_TDLS_PEER_ACTIVE, + CFG80211_STA_MESH_PEER_KERNEL, + CFG80211_STA_MESH_PEER_USER, }; /** - * enum station_stats_flags - station statistics flags + * cfg80211_check_station_change - validate parameter changes + * @wiphy: the wiphy this operates on + * @params: the new parameters for a station + * @statype: the type of station being modified * - * Used by the driver to indicate which info in &struct station_stats - * it has filled in during get_station(). + * Utility function for the @change_station driver method. Call this function + * with the appropriate station type looking up the station (and checking that + * it exists). It will verify whether the station change is acceptable, and if + * not will return an error code. Note that it may modify the parameters for + * backward compatibility reasons, so don't use them before calling this. + */ +int cfg80211_check_station_change(struct wiphy *wiphy, + struct station_parameters *params, + enum cfg80211_station_type statype); + +/** + * enum station_info_flags - station information flags + * + * Used by the driver to indicate which info in &struct station_info + * it has filled in during get_station() or dump_station(). + * + * @STATION_INFO_INACTIVE_TIME: @inactive_time filled + * @STATION_INFO_RX_BYTES: @rx_bytes filled + * @STATION_INFO_TX_BYTES: @tx_bytes filled + * @STATION_INFO_RX_BYTES64: @rx_bytes filled with 64-bit value + * @STATION_INFO_TX_BYTES64: @tx_bytes filled with 64-bit value + * @STATION_INFO_LLID: @llid filled + * @STATION_INFO_PLID: @plid filled + * @STATION_INFO_PLINK_STATE: @plink_state filled + * @STATION_INFO_SIGNAL: @signal filled + * @STATION_INFO_TX_BITRATE: @txrate fields are filled + * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) + * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value + * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value + * @STATION_INFO_TX_RETRIES: @tx_retries filled + * @STATION_INFO_TX_FAILED: @tx_failed filled + * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled + * @STATION_INFO_SIGNAL_AVG: @signal_avg filled + * @STATION_INFO_RX_BITRATE: @rxrate fields are filled + * @STATION_INFO_BSS_PARAM: @bss_param filled + * @STATION_INFO_CONNECTED_TIME: @connected_time filled + * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled + * @STATION_INFO_STA_FLAGS: @sta_flags filled + * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled + * @STATION_INFO_T_OFFSET: @t_offset filled + * @STATION_INFO_LOCAL_PM: @local_pm filled + * @STATION_INFO_PEER_PM: @peer_pm filled + * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled + * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled + * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled + * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled + */ +enum station_info_flags { + STATION_INFO_INACTIVE_TIME = BIT(0), + STATION_INFO_RX_BYTES = BIT(1), + STATION_INFO_TX_BYTES = BIT(2), + STATION_INFO_LLID = BIT(3), + STATION_INFO_PLID = BIT(4), + STATION_INFO_PLINK_STATE = BIT(5), + STATION_INFO_SIGNAL = BIT(6), + STATION_INFO_TX_BITRATE = BIT(7), + STATION_INFO_RX_PACKETS = BIT(8), + STATION_INFO_TX_PACKETS = BIT(9), + STATION_INFO_TX_RETRIES = BIT(10), + STATION_INFO_TX_FAILED = BIT(11), + STATION_INFO_RX_DROP_MISC = BIT(12), + STATION_INFO_SIGNAL_AVG = BIT(13), + STATION_INFO_RX_BITRATE = BIT(14), + STATION_INFO_BSS_PARAM = BIT(15), + STATION_INFO_CONNECTED_TIME = BIT(16), + STATION_INFO_ASSOC_REQ_IES = BIT(17), + STATION_INFO_STA_FLAGS = BIT(18), + STATION_INFO_BEACON_LOSS_COUNT = BIT(19), + STATION_INFO_T_OFFSET = BIT(20), + STATION_INFO_LOCAL_PM = BIT(21), + STATION_INFO_PEER_PM = BIT(22), + STATION_INFO_NONPEER_PM = BIT(23), + STATION_INFO_RX_BYTES64 = BIT(24), + STATION_INFO_TX_BYTES64 = BIT(25), + STATION_INFO_CHAIN_SIGNAL = BIT(26), + STATION_INFO_CHAIN_SIGNAL_AVG = BIT(27), + STATION_INFO_EXPECTED_THROUGHPUT = BIT(28), +}; + +/** + * enum station_info_rate_flags - bitrate info flags * - * @STATION_STAT_INACTIVE_TIME: @inactive_time filled - * @STATION_STAT_RX_BYTES: @rx_bytes filled - * @STATION_STAT_TX_BYTES: @tx_bytes filled + * Used by the driver to indicate the specific rate transmission + * type for 802.11n transmissions. + * + * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS + * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS + * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 MHz width transmission + * @RATE_INFO_FLAGS_80_MHZ_WIDTH: 80 MHz width transmission + * @RATE_INFO_FLAGS_80P80_MHZ_WIDTH: 80+80 MHz width transmission + * @RATE_INFO_FLAGS_160_MHZ_WIDTH: 160 MHz width transmission + * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval + * @RATE_INFO_FLAGS_60G: 60GHz MCS */ -enum station_stats_flags { - STATION_STAT_INACTIVE_TIME = 1<<0, - STATION_STAT_RX_BYTES = 1<<1, - STATION_STAT_TX_BYTES = 1<<2, +enum rate_info_flags { + RATE_INFO_FLAGS_MCS = BIT(0), + RATE_INFO_FLAGS_VHT_MCS = BIT(1), + RATE_INFO_FLAGS_40_MHZ_WIDTH = BIT(2), + RATE_INFO_FLAGS_80_MHZ_WIDTH = BIT(3), + RATE_INFO_FLAGS_80P80_MHZ_WIDTH = BIT(4), + RATE_INFO_FLAGS_160_MHZ_WIDTH = BIT(5), + RATE_INFO_FLAGS_SHORT_GI = BIT(6), + RATE_INFO_FLAGS_60G = BIT(7), }; /** - * struct station_stats - station statistics + * struct rate_info - bitrate information * - * Station information filled by driver for get_station(). + * Information about a receiving or transmitting bitrate * - * @filled: bitflag of flags from &enum station_stats_flags + * @flags: bitflag of flags from &enum rate_info_flags + * @mcs: mcs index if struct describes a 802.11n bitrate + * @legacy: bitrate in 100kbit/s for 802.11abg + * @nss: number of streams (VHT only) + */ +struct rate_info { + u8 flags; + u8 mcs; + u16 legacy; + u8 nss; +}; + +/** + * enum station_info_rate_flags - bitrate info flags + * + * Used by the driver to indicate the specific rate transmission + * type for 802.11n transmissions. + * + * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled + * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled + * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled + */ +enum bss_param_flags { + BSS_PARAM_FLAGS_CTS_PROT = 1<<0, + BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1, + BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2, +}; + +/** + * struct sta_bss_parameters - BSS parameters for the attached station + * + * Information about the currently associated BSS + * + * @flags: bitflag of flags from &enum bss_param_flags + * @dtim_period: DTIM period for the BSS + * @beacon_interval: beacon interval + */ +struct sta_bss_parameters { + u8 flags; + u8 dtim_period; + u16 beacon_interval; +}; + +#define IEEE80211_MAX_CHAINS 4 + +/** + * struct station_info - station information + * + * Station information filled by driver for get_station() and dump_station. + * + * @filled: bitflag of flags from &enum station_info_flags + * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds * @rx_bytes: bytes received from this station * @tx_bytes: bytes transmitted to this station + * @llid: mesh local link id + * @plid: mesh peer link id + * @plink_state: mesh peer link state + * @signal: The signal strength, type depends on the wiphy's signal_type. + * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. + * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. + * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. + * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg + * @chain_signal: per-chain signal strength of last received packet in dBm + * @chain_signal_avg: per-chain signal strength average in dBm + * @txrate: current unicast bitrate from this station + * @rxrate: current unicast bitrate to this station + * @rx_packets: packets received from this station + * @tx_packets: packets transmitted to this station + * @tx_retries: cumulative retry counts + * @tx_failed: number of failed transmissions (retries exceeded, no ACK) + * @rx_dropped_misc: Dropped for un-specified reason. + * @bss_param: current BSS parameters + * @generation: generation number for nl80211 dumps. + * This number should increase every time the list of stations + * changes, i.e. when a station is added or removed, so that + * userspace can tell whether it got a consistent snapshot. + * @assoc_req_ies: IEs from (Re)Association Request. + * This is used only when in AP mode with drivers that do not use + * user space MLME/SME implementation. The information is provided for + * the cfg80211_new_sta() calls to notify user space of the IEs. + * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. + * @sta_flags: station flags mask & values + * @beacon_loss_count: Number of times beacon loss event has triggered. + * @t_offset: Time offset of the station relative to this host. + * @local_pm: local mesh STA power save mode + * @peer_pm: peer mesh STA power save mode + * @nonpeer_pm: non-peer mesh STA power save mode + * @expected_throughput: expected throughput in kbps (including 802.11 headers) + * towards this station. */ -struct station_stats { +struct station_info { u32 filled; + u32 connected_time; u32 inactive_time; - u32 rx_bytes; - u32 tx_bytes; + u64 rx_bytes; + u64 tx_bytes; + u16 llid; + u16 plid; + u8 plink_state; + s8 signal; + s8 signal_avg; + + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; + + struct rate_info txrate; + struct rate_info rxrate; + u32 rx_packets; + u32 tx_packets; + u32 tx_retries; + u32 tx_failed; + u32 rx_dropped_misc; + struct sta_bss_parameters bss_param; + struct nl80211_sta_flag_update sta_flags; + + int generation; + + const u8 *assoc_req_ies; + size_t assoc_req_ies_len; + + u32 beacon_loss_count; + s64 t_offset; + enum nl80211_mesh_power_mode local_pm; + enum nl80211_mesh_power_mode peer_pm; + enum nl80211_mesh_power_mode nonpeer_pm; + + u32 expected_throughput; + + /* + * Note: Add a new enum station_info_flags value for each new field and + * use it to check which fields are initialized. + */ }; -/* from net/wireless.h */ -struct wiphy; +/** + * cfg80211_get_station - retrieve information about a given station + * @dev: the device where the station is supposed to be connected to + * @mac_addr: the mac address of the station of interest + * @sinfo: pointer to the structure to fill with the information + * + * Returns 0 on success and sinfo is filled with the available information + * otherwise returns a negative error code and the content of sinfo has to be + * considered undefined. + */ +int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo); + +/** + * enum monitor_flags - monitor flags + * + * Monitor interface configuration flags. Note that these must be the bits + * according to the nl80211 flags. + * + * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS + * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP + * @MONITOR_FLAG_CONTROL: pass control frames + * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering + * @MONITOR_FLAG_COOK_FRAMES: report frames after processing + * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address + */ +enum monitor_flags { + MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL, + MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL, + MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL, + MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS, + MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES, + MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE, +}; + +/** + * enum mpath_info_flags - mesh path information flags + * + * Used by the driver to indicate which info in &struct mpath_info it has filled + * in during get_station() or dump_station(). + * + * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled + * @MPATH_INFO_SN: @sn filled + * @MPATH_INFO_METRIC: @metric filled + * @MPATH_INFO_EXPTIME: @exptime filled + * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled + * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled + * @MPATH_INFO_FLAGS: @flags filled + */ +enum mpath_info_flags { + MPATH_INFO_FRAME_QLEN = BIT(0), + MPATH_INFO_SN = BIT(1), + MPATH_INFO_METRIC = BIT(2), + MPATH_INFO_EXPTIME = BIT(3), + MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4), + MPATH_INFO_DISCOVERY_RETRIES = BIT(5), + MPATH_INFO_FLAGS = BIT(6), +}; + +/** + * struct mpath_info - mesh path information + * + * Mesh path information filled by driver for get_mpath() and dump_mpath(). + * + * @filled: bitfield of flags from &enum mpath_info_flags + * @frame_qlen: number of queued frames for this destination + * @sn: target sequence number + * @metric: metric (cost) of this mesh path + * @exptime: expiration time for the mesh path from now, in msecs + * @flags: mesh path flags + * @discovery_timeout: total mesh path discovery timeout, in msecs + * @discovery_retries: mesh path discovery retries + * @generation: generation number for nl80211 dumps. + * This number should increase every time the list of mesh paths + * changes, i.e. when a station is added or removed, so that + * userspace can tell whether it got a consistent snapshot. + */ +struct mpath_info { + u32 filled; + u32 frame_qlen; + u32 sn; + u32 metric; + u32 exptime; + u32 discovery_timeout; + u8 discovery_retries; + u8 flags; + + int generation; +}; + +/** + * struct bss_parameters - BSS parameters + * + * Used to change BSS parameters (mainly for AP mode). + * + * @use_cts_prot: Whether to use CTS protection + * (0 = no, 1 = yes, -1 = do not change) + * @use_short_preamble: Whether the use of short preambles is allowed + * (0 = no, 1 = yes, -1 = do not change) + * @use_short_slot_time: Whether the use of short slot time is allowed + * (0 = no, 1 = yes, -1 = do not change) + * @basic_rates: basic rates in IEEE 802.11 format + * (or NULL for no change) + * @basic_rates_len: number of basic rates + * @ap_isolate: do not forward packets between connected stations + * @ht_opmode: HT Operation mode + * (u16 = opmode, -1 = do not change) + * @p2p_ctwindow: P2P CT Window (-1 = no change) + * @p2p_opp_ps: P2P opportunistic PS (-1 = no change) + */ +struct bss_parameters { + int use_cts_prot; + int use_short_preamble; + int use_short_slot_time; + const u8 *basic_rates; + u8 basic_rates_len; + int ap_isolate; + int ht_opmode; + s8 p2p_ctwindow, p2p_opp_ps; +}; + +/** + * struct mesh_config - 802.11s mesh configuration + * + * These parameters can be changed while the mesh is active. + * + * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used + * by the Mesh Peering Open message + * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units + * used by the Mesh Peering Open message + * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by + * the mesh peering management to close a mesh peering + * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this + * mesh interface + * @dot11MeshMaxRetries: the maximum number of peer link open retries that can + * be sent to establish a new peer link instance in a mesh + * @dot11MeshTTL: the value of TTL field set at a source mesh STA + * @element_ttl: the value of TTL field set at a mesh STA for path selection + * elements + * @auto_open_plinks: whether we should automatically open peer links when we + * detect compatible mesh peers + * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to + * synchronize to for 11s default synchronization method + * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ + * that an originator mesh STA can send to a particular path target + * @path_refresh_time: how frequently to refresh mesh paths in milliseconds + * @min_discovery_timeout: the minimum length of time to wait until giving up on + * a path discovery in milliseconds + * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs + * receiving a PREQ shall consider the forwarding information from the + * root to be valid. (TU = time unit) + * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during + * which a mesh STA can send only one action frame containing a PREQ + * element + * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during + * which a mesh STA can send only one Action frame containing a PERR + * element + * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that + * it takes for an HWMP information element to propagate across the mesh + * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA + * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root + * announcements are transmitted + * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh + * station has access to a broader network beyond the MBSS. (This is + * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true + * only means that the station will announce others it's a mesh gate, but + * not necessarily using the gate announcement protocol. Still keeping the + * same nomenclature to be in sync with the spec) + * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding + * entity (default is TRUE - forwarding entity) + * @rssi_threshold: the threshold for average signal strength of candidate + * station to establish a peer link + * @ht_opmode: mesh HT protection mode + * + * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs + * receiving a proactive PREQ shall consider the forwarding information to + * the root mesh STA to be valid. + * + * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive + * PREQs are transmitted. + * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs) + * during which a mesh STA can send only one Action frame containing + * a PREQ element for root path confirmation. + * @power_mode: The default mesh power save mode which will be the initial + * setting for new peer links. + * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake + * after transmitting its beacon. + * @plink_timeout: If no tx activity is seen from a STA we've established + * peering with for longer than this time (in seconds), then remove it + * from the STA's list of peers. Default is 30 minutes. + */ +struct mesh_config { + u16 dot11MeshRetryTimeout; + u16 dot11MeshConfirmTimeout; + u16 dot11MeshHoldingTimeout; + u16 dot11MeshMaxPeerLinks; + u8 dot11MeshMaxRetries; + u8 dot11MeshTTL; + u8 element_ttl; + bool auto_open_plinks; + u32 dot11MeshNbrOffsetMaxNeighbor; + u8 dot11MeshHWMPmaxPREQretries; + u32 path_refresh_time; + u16 min_discovery_timeout; + u32 dot11MeshHWMPactivePathTimeout; + u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; + u16 dot11MeshHWMPnetDiameterTraversalTime; + u8 dot11MeshHWMPRootMode; + u16 dot11MeshHWMPRannInterval; + bool dot11MeshGateAnnouncementProtocol; + bool dot11MeshForwarding; + s32 rssi_threshold; + u16 ht_opmode; + u32 dot11MeshHWMPactivePathToRootTimeout; + u16 dot11MeshHWMProotInterval; + u16 dot11MeshHWMPconfirmationInterval; + enum nl80211_mesh_power_mode power_mode; + u16 dot11MeshAwakeWindowDuration; + u32 plink_timeout; +}; + +/** + * struct mesh_setup - 802.11s mesh setup configuration + * @chandef: defines the channel to use + * @mesh_id: the mesh ID + * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes + * @sync_method: which synchronization method to use + * @path_sel_proto: which path selection protocol to use + * @path_metric: which metric to use + * @auth_id: which authentication method this mesh is using + * @ie: vendor information elements (optional) + * @ie_len: length of vendor information elements + * @is_authenticated: this mesh requires authentication + * @is_secure: this mesh uses security + * @user_mpm: userspace handles all MPM functions + * @dtim_period: DTIM period to use + * @beacon_interval: beacon interval to use + * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] + * @basic_rates: basic rates to use when creating the mesh + * + * These parameters are fixed when the mesh is created. + */ +struct mesh_setup { + struct cfg80211_chan_def chandef; + const u8 *mesh_id; + u8 mesh_id_len; + u8 sync_method; + u8 path_sel_proto; + u8 path_metric; + u8 auth_id; + const u8 *ie; + u8 ie_len; + bool is_authenticated; + bool is_secure; + bool user_mpm; + u8 dtim_period; + u16 beacon_interval; + int mcast_rate[IEEE80211_NUM_BANDS]; + u32 basic_rates; +}; + +/** + * struct ieee80211_txq_params - TX queue parameters + * @ac: AC identifier + * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled + * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range + * 1..32767] + * @cwmax: Maximum contention window [a value of the form 2^n-1 in the range + * 1..32767] + * @aifs: Arbitration interframe space [0..255] + */ +struct ieee80211_txq_params { + enum nl80211_ac ac; + u16 txop; + u16 cwmin; + u16 cwmax; + u8 aifs; +}; + +/** + * DOC: Scanning and BSS list handling + * + * The scanning process itself is fairly simple, but cfg80211 offers quite + * a bit of helper functionality. To start a scan, the scan operation will + * be invoked with a scan definition. This scan definition contains the + * channels to scan, and the SSIDs to send probe requests for (including the + * wildcard, if desired). A passive scan is indicated by having no SSIDs to + * probe. Additionally, a scan request may contain extra information elements + * that should be added to the probe request. The IEs are guaranteed to be + * well-formed, and will not exceed the maximum length the driver advertised + * in the wiphy structure. + * + * When scanning finds a BSS, cfg80211 needs to be notified of that, because + * it is responsible for maintaining the BSS list; the driver should not + * maintain a list itself. For this notification, various functions exist. + * + * Since drivers do not maintain a BSS list, there are also a number of + * functions to search for a BSS and obtain information about it from the + * BSS structure cfg80211 maintains. The BSS list is also made available + * to userspace. + */ + +/** + * struct cfg80211_ssid - SSID description + * @ssid: the SSID + * @ssid_len: length of the ssid + */ +struct cfg80211_ssid { + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; +}; + +/** + * struct cfg80211_scan_request - scan request description + * + * @ssids: SSIDs to scan for (active scan only) + * @n_ssids: number of SSIDs + * @channels: channels to scan on. + * @n_channels: total number of channels to scan + * @scan_width: channel width for scanning + * @ie: optional information element(s) to add into Probe Request or %NULL + * @ie_len: length of ie in octets + * @flags: bit field of flags controlling operation + * @rates: bitmap of rates to advertise for each band + * @wiphy: the wiphy this was for + * @scan_start: time (in jiffies) when the scan started + * @wdev: the wireless device to scan for + * @aborted: (internal) scan request was notified as aborted + * @notified: (internal) scan request was notified as done or aborted + * @no_cck: used to send probe requests at non CCK rate in 2GHz band + */ +struct cfg80211_scan_request { + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + enum nl80211_bss_scan_width scan_width; + const u8 *ie; + size_t ie_len; + u32 flags; + + u32 rates[IEEE80211_NUM_BANDS]; + + struct wireless_dev *wdev; + + /* internal */ + struct wiphy *wiphy; + unsigned long scan_start; + bool aborted, notified; + bool no_cck; + + /* keep last */ + struct ieee80211_channel *channels[0]; +}; + +/** + * struct cfg80211_match_set - sets of attributes to match + * + * @ssid: SSID to be matched; may be zero-length for no match (RSSI only) + * @rssi_thold: don't report scan results below this threshold (in s32 dBm) + */ +struct cfg80211_match_set { + struct cfg80211_ssid ssid; + s32 rssi_thold; +}; + +/** + * struct cfg80211_sched_scan_request - scheduled scan request description + * + * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) + * @n_ssids: number of SSIDs + * @n_channels: total number of channels to scan + * @scan_width: channel width for scanning + * @interval: interval between each scheduled scan cycle + * @ie: optional information element(s) to add into Probe Request or %NULL + * @ie_len: length of ie in octets + * @flags: bit field of flags controlling operation + * @match_sets: sets of parameters to be matched for a scan result + * entry to be considered valid and to be passed to the host + * (others are filtered out). + * If ommited, all results are passed. + * @n_match_sets: number of match sets + * @wiphy: the wiphy this was for + * @dev: the interface + * @scan_start: start time of the scheduled scan + * @channels: channels to scan + * @min_rssi_thold: for drivers only supporting a single threshold, this + * contains the minimum over all matchsets + */ +struct cfg80211_sched_scan_request { + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + enum nl80211_bss_scan_width scan_width; + u32 interval; + const u8 *ie; + size_t ie_len; + u32 flags; + struct cfg80211_match_set *match_sets; + int n_match_sets; + s32 min_rssi_thold; + + /* internal */ + struct wiphy *wiphy; + struct net_device *dev; + unsigned long scan_start; + + /* keep last */ + struct ieee80211_channel *channels[0]; +}; + +/** + * enum cfg80211_signal_type - signal type + * + * @CFG80211_SIGNAL_TYPE_NONE: no signal strength information available + * @CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) + * @CFG80211_SIGNAL_TYPE_UNSPEC: signal strength, increasing from 0 through 100 + */ +enum cfg80211_signal_type { + CFG80211_SIGNAL_TYPE_NONE, + CFG80211_SIGNAL_TYPE_MBM, + CFG80211_SIGNAL_TYPE_UNSPEC, +}; + +/** + * struct cfg80211_bss_ie_data - BSS entry IE data + * @tsf: TSF contained in the frame that carried these IEs + * @rcu_head: internal use, for freeing + * @len: length of the IEs + * @data: IE data + */ +struct cfg80211_bss_ies { + u64 tsf; + struct rcu_head rcu_head; + int len; + u8 data[]; +}; + +/** + * struct cfg80211_bss - BSS description + * + * This structure describes a BSS (which may also be a mesh network) + * for use in scan results and similar. + * + * @channel: channel this BSS is on + * @scan_width: width of the control channel + * @bssid: BSSID of the BSS + * @beacon_interval: the beacon interval as from the frame + * @capability: the capability field in host byte order + * @ies: the information elements (Note that there is no guarantee that these + * are well-formed!); this is a pointer to either the beacon_ies or + * proberesp_ies depending on whether Probe Response frame has been + * received. It is always non-%NULL. + * @beacon_ies: the information elements from the last Beacon frame + * (implementation note: if @hidden_beacon_bss is set this struct doesn't + * own the beacon_ies, but they're just pointers to the ones from the + * @hidden_beacon_bss struct) + * @proberesp_ies: the information elements from the last Probe Response frame + * @hidden_beacon_bss: in case this BSS struct represents a probe response from + * a BSS that hides the SSID in its beacon, this points to the BSS struct + * that holds the beacon data. @beacon_ies is still valid, of course, and + * points to the same data as hidden_beacon_bss->beacon_ies in that case. + * @signal: signal strength value (type depends on the wiphy's signal_type) + * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes + */ +struct cfg80211_bss { + struct ieee80211_channel *channel; + enum nl80211_bss_scan_width scan_width; + + const struct cfg80211_bss_ies __rcu *ies; + const struct cfg80211_bss_ies __rcu *beacon_ies; + const struct cfg80211_bss_ies __rcu *proberesp_ies; + + struct cfg80211_bss *hidden_beacon_bss; + + s32 signal; + + u16 beacon_interval; + u16 capability; + + u8 bssid[ETH_ALEN]; + + u8 priv[0] __aligned(sizeof(void *)); +}; + +/** + * ieee80211_bss_get_ie - find IE with given ID + * @bss: the bss to search + * @ie: the IE ID + * + * Note that the return value is an RCU-protected pointer, so + * rcu_read_lock() must be held when calling this function. + * Return: %NULL if not found. + */ +const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie); + + +/** + * struct cfg80211_auth_request - Authentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * authentication. + * + * @bss: The BSS to authenticate with, the callee must obtain a reference + * to it if it needs to keep it. + * @auth_type: Authentication type (algorithm) + * @ie: Extra IEs to add to Authentication frame or %NULL + * @ie_len: Length of ie buffer in octets + * @key_len: length of WEP key for shared key authentication + * @key_idx: index of WEP key for shared key authentication + * @key: WEP key for shared key authentication + * @sae_data: Non-IE data to use with SAE or %NULL. This starts with + * Authentication transaction sequence number field. + * @sae_data_len: Length of sae_data buffer in octets + */ +struct cfg80211_auth_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + enum nl80211_auth_type auth_type; + const u8 *key; + u8 key_len, key_idx; + const u8 *sae_data; + size_t sae_data_len; +}; + +/** + * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. + * + * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) + * @ASSOC_REQ_DISABLE_VHT: Disable VHT + */ +enum cfg80211_assoc_req_flags { + ASSOC_REQ_DISABLE_HT = BIT(0), + ASSOC_REQ_DISABLE_VHT = BIT(1), +}; + +/** + * struct cfg80211_assoc_request - (Re)Association request data + * + * This structure provides information needed to complete IEEE 802.11 + * (re)association. + * @bss: The BSS to associate with. If the call is successful the driver is + * given a reference that it must give back to cfg80211_send_rx_assoc() + * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new + * association requests while already associating must be rejected. + * @ie: Extra IEs to add to (Re)Association Request frame or %NULL + * @ie_len: Length of ie buffer in octets + * @use_mfp: Use management frame protection (IEEE 802.11w) in this association + * @crypto: crypto settings + * @prev_bssid: previous BSSID, if not %NULL use reassociate frame + * @flags: See &enum cfg80211_assoc_req_flags + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. + * @vht_capa: VHT capability override + * @vht_capa_mask: VHT capability mask indicating which fields to use + */ +struct cfg80211_assoc_request { + struct cfg80211_bss *bss; + const u8 *ie, *prev_bssid; + size_t ie_len; + struct cfg80211_crypto_settings crypto; + bool use_mfp; + u32 flags; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa, vht_capa_mask; +}; + +/** + * struct cfg80211_deauth_request - Deauthentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * deauthentication. + * + * @bssid: the BSSID of the BSS to deauthenticate from + * @ie: Extra IEs to add to Deauthentication frame or %NULL + * @ie_len: Length of ie buffer in octets + * @reason_code: The reason code for the deauthentication + * @local_state_change: if set, change local state only and + * do not set a deauth frame + */ +struct cfg80211_deauth_request { + const u8 *bssid; + const u8 *ie; + size_t ie_len; + u16 reason_code; + bool local_state_change; +}; + +/** + * struct cfg80211_disassoc_request - Disassociation request data + * + * This structure provides information needed to complete IEEE 802.11 + * disassocation. + * + * @bss: the BSS to disassociate from + * @ie: Extra IEs to add to Disassociation frame or %NULL + * @ie_len: Length of ie buffer in octets + * @reason_code: The reason code for the disassociation + * @local_state_change: This is a request for a local state only, i.e., no + * Disassociation frame is to be transmitted. + */ +struct cfg80211_disassoc_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + u16 reason_code; + bool local_state_change; +}; + +/** + * struct cfg80211_ibss_params - IBSS parameters + * + * This structure defines the IBSS parameters for the join_ibss() + * method. + * + * @ssid: The SSID, will always be non-null. + * @ssid_len: The length of the SSID, will always be non-zero. + * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not + * search for IBSSs with a different BSSID. + * @chandef: defines the channel to use if no other IBSS to join can be found + * @channel_fixed: The channel should be fixed -- do not search for + * IBSSs to join on other channels. + * @ie: information element(s) to include in the beacon + * @ie_len: length of that + * @beacon_interval: beacon interval to use + * @privacy: this is a protected network, keys will be configured + * after joining + * @control_port: whether user space controls IEEE 802.1X port, i.e., + * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is + * required to assume that the port is unauthorized until authorized by + * user space. Otherwise, port is marked authorized by default. + * @userspace_handles_dfs: whether user space controls DFS operation, i.e. + * changes the channel when a radar is detected. This is required + * to operate on DFS channels. + * @basic_rates: bitmap of basic rates to use when creating the IBSS + * @mcast_rate: per-band multicast rate index + 1 (0: disabled) + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. + */ +struct cfg80211_ibss_params { + const u8 *ssid; + const u8 *bssid; + struct cfg80211_chan_def chandef; + const u8 *ie; + u8 ssid_len, ie_len; + u16 beacon_interval; + u32 basic_rates; + bool channel_fixed; + bool privacy; + bool control_port; + bool userspace_handles_dfs; + int mcast_rate[IEEE80211_NUM_BANDS]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; +}; + +/** + * struct cfg80211_connect_params - Connection parameters + * + * This structure provides information needed to complete IEEE 802.11 + * authentication and association. + * + * @channel: The channel to use or %NULL if not specified (auto-select based + * on scan results) + * @channel_hint: The channel of the recommended BSS for initial connection or + * %NULL if not specified + * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan + * results) + * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or + * %NULL if not specified. Unlike the @bssid parameter, the driver is + * allowed to ignore this @bssid_hint if it has knowledge of a better BSS + * to use. + * @ssid: SSID + * @ssid_len: Length of ssid in octets + * @auth_type: Authentication type (algorithm) + * @ie: IEs for association request + * @ie_len: Length of assoc_ie in octets + * @privacy: indicates whether privacy-enabled APs should be used + * @mfp: indicate whether management frame protection is used + * @crypto: crypto settings + * @key_len: length of WEP key for shared key authentication + * @key_idx: index of WEP key for shared key authentication + * @key: WEP key for shared key authentication + * @flags: See &enum cfg80211_assoc_req_flags + * @bg_scan_period: Background scan period in seconds + * or -1 to indicate that default value is to be used. + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. + * @vht_capa: VHT Capability overrides + * @vht_capa_mask: The bits of vht_capa which are to be used. + */ +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_hint; + const u8 *bssid; + const u8 *bssid_hint; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + const u8 *ie; + size_t ie_len; + bool privacy; + enum nl80211_mfp mfp; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len, key_idx; + u32 flags; + int bg_scan_period; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; +}; + +/** + * enum wiphy_params_flags - set_wiphy_params bitfield values + * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed + * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed + * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed + * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed + * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed + */ +enum wiphy_params_flags { + WIPHY_PARAM_RETRY_SHORT = 1 << 0, + WIPHY_PARAM_RETRY_LONG = 1 << 1, + WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, + WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, + WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, +}; + +/* + * cfg80211_bitrate_mask - masks for bitrate control + */ +struct cfg80211_bitrate_mask { + struct { + u32 legacy; + u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; + u16 vht_mcs[NL80211_VHT_NSS_MAX]; + enum nl80211_txrate_gi gi; + } control[IEEE80211_NUM_BANDS]; +}; +/** + * struct cfg80211_pmksa - PMK Security Association + * + * This structure is passed to the set/del_pmksa() method for PMKSA + * caching. + * + * @bssid: The AP's BSSID. + * @pmkid: The PMK material itself. + */ +struct cfg80211_pmksa { + const u8 *bssid; + const u8 *pmkid; +}; + +/** + * struct cfg80211_pkt_pattern - packet pattern + * @mask: bitmask where to match pattern and where to ignore bytes, + * one bit per byte, in same format as nl80211 + * @pattern: bytes to match where bitmask is 1 + * @pattern_len: length of pattern (in bytes) + * @pkt_offset: packet offset (in bytes) + * + * Internal note: @mask and @pattern are allocated in one chunk of + * memory, free @mask only! + */ +struct cfg80211_pkt_pattern { + const u8 *mask, *pattern; + int pattern_len; + int pkt_offset; +}; + +/** + * struct cfg80211_wowlan_tcp - TCP connection parameters + * + * @sock: (internal) socket for source port allocation + * @src: source IP address + * @dst: destination IP address + * @dst_mac: destination MAC address + * @src_port: source port + * @dst_port: destination port + * @payload_len: data payload length + * @payload: data payload buffer + * @payload_seq: payload sequence stamping configuration + * @data_interval: interval at which to send data packets + * @wake_len: wakeup payload match length + * @wake_data: wakeup payload match data + * @wake_mask: wakeup payload match mask + * @tokens_size: length of the tokens buffer + * @payload_tok: payload token usage configuration + */ +struct cfg80211_wowlan_tcp { + struct socket *sock; + __be32 src, dst; + u16 src_port, dst_port; + u8 dst_mac[ETH_ALEN]; + int payload_len; + const u8 *payload; + struct nl80211_wowlan_tcp_data_seq payload_seq; + u32 data_interval; + u32 wake_len; + const u8 *wake_data, *wake_mask; + u32 tokens_size; + /* must be last, variable member */ + struct nl80211_wowlan_tcp_data_token payload_tok; +}; + +/** + * struct cfg80211_wowlan - Wake on Wireless-LAN support info + * + * This structure defines the enabled WoWLAN triggers for the device. + * @any: wake up on any activity -- special trigger if device continues + * operating as normal during suspend + * @disconnect: wake up if getting disconnected + * @magic_pkt: wake up on receiving magic packet + * @patterns: wake up on receiving packet matching a pattern + * @n_patterns: number of patterns + * @gtk_rekey_failure: wake up on GTK rekey failure + * @eap_identity_req: wake up on EAP identity request packet + * @four_way_handshake: wake up on 4-way handshake + * @rfkill_release: wake up when rfkill is released + * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. + * NULL if not configured. + */ +struct cfg80211_wowlan { + bool any, disconnect, magic_pkt, gtk_rekey_failure, + eap_identity_req, four_way_handshake, + rfkill_release; + struct cfg80211_pkt_pattern *patterns; + struct cfg80211_wowlan_tcp *tcp; + int n_patterns; +}; + +/** + * struct cfg80211_coalesce_rules - Coalesce rule parameters + * + * This structure defines coalesce rule for the device. + * @delay: maximum coalescing delay in msecs. + * @condition: condition for packet coalescence. + * see &enum nl80211_coalesce_condition. + * @patterns: array of packet patterns + * @n_patterns: number of patterns + */ +struct cfg80211_coalesce_rules { + int delay; + enum nl80211_coalesce_condition condition; + struct cfg80211_pkt_pattern *patterns; + int n_patterns; +}; + +/** + * struct cfg80211_coalesce - Packet coalescing settings + * + * This structure defines coalescing settings. + * @rules: array of coalesce rules + * @n_rules: number of rules + */ +struct cfg80211_coalesce { + struct cfg80211_coalesce_rules *rules; + int n_rules; +}; + +/** + * struct cfg80211_wowlan_wakeup - wakeup report + * @disconnect: woke up by getting disconnected + * @magic_pkt: woke up by receiving magic packet + * @gtk_rekey_failure: woke up by GTK rekey failure + * @eap_identity_req: woke up by EAP identity request packet + * @four_way_handshake: woke up by 4-way handshake + * @rfkill_release: woke up by rfkill being released + * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern + * @packet_present_len: copied wakeup packet data + * @packet_len: original wakeup packet length + * @packet: The packet causing the wakeup, if any. + * @packet_80211: For pattern match, magic packet and other data + * frame triggers an 802.3 frame should be reported, for + * disconnect due to deauth 802.11 frame. This indicates which + * it is. + * @tcp_match: TCP wakeup packet received + * @tcp_connlost: TCP connection lost or failed to establish + * @tcp_nomoretokens: TCP data ran out of tokens + */ +struct cfg80211_wowlan_wakeup { + bool disconnect, magic_pkt, gtk_rekey_failure, + eap_identity_req, four_way_handshake, + rfkill_release, packet_80211, + tcp_match, tcp_connlost, tcp_nomoretokens; + s32 pattern_idx; + u32 packet_present_len, packet_len; + const void *packet; +}; + +/** + * struct cfg80211_gtk_rekey_data - rekey data + * @kek: key encryption key + * @kck: key confirmation key + * @replay_ctr: replay counter + */ +struct cfg80211_gtk_rekey_data { + u8 kek[NL80211_KEK_LEN]; + u8 kck[NL80211_KCK_LEN]; + u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; +}; + +/** + * struct cfg80211_update_ft_ies_params - FT IE Information + * + * This structure provides information needed to update the fast transition IE + * + * @md: The Mobility Domain ID, 2 Octet value + * @ie: Fast Transition IEs + * @ie_len: Length of ft_ie in octets + */ +struct cfg80211_update_ft_ies_params { + u16 md; + const u8 *ie; + size_t ie_len; +}; + +/** + * struct cfg80211_mgmt_tx_params - mgmt tx parameters + * + * This structure provides information needed to transmit a mgmt frame + * + * @chan: channel to use + * @offchan: indicates wether off channel operation is required + * @wait: duration for ROC + * @buf: buffer to transmit + * @len: buffer length + * @no_cck: don't use cck rates for this frame + * @dont_wait_for_ack: tells the low level not to wait for an ack + * @n_csa_offsets: length of csa_offsets array + * @csa_offsets: array of all the csa offsets in the frame + */ +struct cfg80211_mgmt_tx_params { + struct ieee80211_channel *chan; + bool offchan; + unsigned int wait; + const u8 *buf; + size_t len; + bool no_cck; + bool dont_wait_for_ack; + int n_csa_offsets; + const u16 *csa_offsets; +}; + +/** + * struct cfg80211_dscp_exception - DSCP exception + * + * @dscp: DSCP value that does not adhere to the user priority range definition + * @up: user priority value to which the corresponding DSCP value belongs + */ +struct cfg80211_dscp_exception { + u8 dscp; + u8 up; +}; + +/** + * struct cfg80211_dscp_range - DSCP range definition for user priority + * + * @low: lowest DSCP value of this user priority range, inclusive + * @high: highest DSCP value of this user priority range, inclusive + */ +struct cfg80211_dscp_range { + u8 low; + u8 high; +}; + +/* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */ +#define IEEE80211_QOS_MAP_MAX_EX 21 +#define IEEE80211_QOS_MAP_LEN_MIN 16 +#define IEEE80211_QOS_MAP_LEN_MAX \ + (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX) + +/** + * struct cfg80211_qos_map - QoS Map Information + * + * This struct defines the Interworking QoS map setting for DSCP values + * + * @num_des: number of DSCP exceptions (0..21) + * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from + * the user priority DSCP range definition + * @up: DSCP range definition for a particular user priority + */ +struct cfg80211_qos_map { + u8 num_des; + struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX]; + struct cfg80211_dscp_range up[8]; +}; /** * struct cfg80211_ops - backend description for wireless configuration @@ -179,11 +2082,24 @@ struct wiphy; * wireless extensions but this is subject to reevaluation as soon as this * code is used more widely and we have a first user without wext. * - * @add_virtual_intf: create a new virtual interface with the given name + * @suspend: wiphy device needs to be suspended. The variable @wow will + * be %NULL or contain the enabled Wake-on-Wireless triggers that are + * configured for the device. + * @resume: wiphy device needs to be resumed + * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback + * to call device_set_wakeup_enable() to enable/disable wakeup from + * the device. + * + * @add_virtual_intf: create a new virtual interface with the given name, + * must set the struct wireless_dev's iftype. Beware: You must create + * the new netdev in the wiphy's network namespace! Returns the struct + * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must + * also set the address member in the wdev. * - * @del_virtual_intf: remove the virtual interface determined by ifindex. + * @del_virtual_intf: remove the virtual interface * - * @change_virtual_intf: change type of virtual interface + * @change_virtual_intf: change type/configuration of virtual interface, + * keep the struct wireless_dev's iftype updated. * * @add_key: add a key with the given parameters. @mac_addr will be %NULL * when adding a group key. @@ -191,60 +2107,2786 @@ struct wiphy; * @get_key: get information about the key with the given parameters. * @mac_addr will be %NULL when requesting information for a group * key. All pointers given to the @callback function need not be valid - * after it returns. + * after it returns. This function should return an error if it is + * not possible to retrieve the key, -ENOENT if it doesn't exist. * * @del_key: remove a key given the @mac_addr (%NULL for a group key) - * and @key_index + * and @key_index, return -ENOENT if the key doesn't exist. * * @set_default_key: set the default key on an interface * - * @add_beacon: Add a beacon with given parameters, @head, @interval - * and @dtim_period will be valid, @tail is optional. - * @set_beacon: Change the beacon parameters for an access point mode - * interface. This should reject the call when no beacon has been - * configured. - * @del_beacon: Remove beacon configuration and stop sending the beacon. + * @set_default_mgmt_key: set the default management frame key on an interface * - * @add_station: Add a new station. + * @set_rekey_data: give the data necessary for GTK rekeying to the driver + * + * @start_ap: Start acting in AP mode defined by the parameters. + * @change_beacon: Change the beacon parameters for an access point mode + * interface. This should reject the call when AP mode wasn't started. + * @stop_ap: Stop being an AP, including stopping beaconing. * + * @add_station: Add a new station. * @del_station: Remove a station; @mac may be NULL to remove all stations. + * @change_station: Modify a given station. Note that flags changes are not much + * validated in cfg80211, in particular the auth/assoc/authorized flags + * might come to the driver in invalid combinations -- make sure to check + * them, also against the existing state! Drivers must call + * cfg80211_check_station_change() to validate the information. + * @get_station: get station information for the station identified by @mac + * @dump_station: dump station callback -- resume dump at index @idx + * + * @add_mpath: add a fixed mesh path + * @del_mpath: delete a given mesh path + * @change_mpath: change a given mesh path + * @get_mpath: get a mesh path for the given parameters + * @dump_mpath: dump mesh path callback -- resume dump at index @idx + * @join_mesh: join the mesh network with the specified parameters + * (invoked with the wireless_dev mutex held) + * @leave_mesh: leave the current mesh network + * (invoked with the wireless_dev mutex held) + * + * @get_mesh_config: Get the current mesh configuration + * + * @update_mesh_config: Update mesh parameters on a running mesh. + * The mask is a bitfield which tells us which parameters to + * set, and which to leave alone. * - * @change_station: Modify a given station. + * @change_bss: Modify parameters for a given BSS. + * + * @set_txq_params: Set TX queue parameters + * + * @libertas_set_mesh_channel: Only for backward compatibility for libertas, + * as it doesn't implement join_mesh and needs to set the channel to + * join the mesh instead. + * + * @set_monitor_channel: Set the monitor mode channel for the device. If other + * interfaces are active this callback should reject the configuration. + * If no interfaces are active or the device is down, the channel should + * be stored for when a monitor interface becomes active. + * + * @scan: Request to do a scan. If returning zero, the scan request is given + * the driver, and will be valid until passed to cfg80211_scan_done(). + * For scan results, call cfg80211_inform_bss(); you can call this outside + * the scan/scan_done bracket too. + * + * @auth: Request to authenticate with the specified peer + * (invoked with the wireless_dev mutex held) + * @assoc: Request to (re)associate with the specified peer + * (invoked with the wireless_dev mutex held) + * @deauth: Request to deauthenticate from the specified peer + * (invoked with the wireless_dev mutex held) + * @disassoc: Request to disassociate from the specified peer + * (invoked with the wireless_dev mutex held) + * + * @connect: Connect to the ESS with the specified parameters. When connected, + * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. + * If the connection fails for some reason, call cfg80211_connect_result() + * with the status from the AP. + * (invoked with the wireless_dev mutex held) + * @disconnect: Disconnect from the BSS/ESS. + * (invoked with the wireless_dev mutex held) + * + * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call + * cfg80211_ibss_joined(), also call that function when changing BSSID due + * to a merge. + * (invoked with the wireless_dev mutex held) + * @leave_ibss: Leave the IBSS. + * (invoked with the wireless_dev mutex held) + * + * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or + * MESH mode) + * + * @set_wiphy_params: Notify that wiphy parameters have changed; + * @changed bitfield (see &enum wiphy_params_flags) describes which values + * have changed. The actual parameter values are available in + * struct wiphy. If returning an error, no value should be changed. + * + * @set_tx_power: set the transmit power according to the parameters, + * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The + * wdev may be %NULL if power was set for the wiphy, and will + * always be %NULL unless the driver supports per-vif TX power + * (as advertised by the nl80211 feature flag.) + * @get_tx_power: store the current TX power into the dbm variable; + * return 0 if successful + * + * @set_wds_peer: set the WDS peer for a WDS interface + * + * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting + * functions to adjust rfkill hw state + * + * @dump_survey: get site survey information. + * + * @remain_on_channel: Request the driver to remain awake on the specified + * channel for the specified duration to complete an off-channel + * operation (e.g., public action frame exchange). When the driver is + * ready on the requested channel, it must indicate this with an event + * notification by calling cfg80211_ready_on_channel(). + * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. + * This allows the operation to be terminated prior to timeout based on + * the duration value. + * @mgmt_tx: Transmit a management frame. + * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management + * frame on another channel + * + * @testmode_cmd: run a test mode command; @wdev may be %NULL + * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be + * used by the function, but 0 and 1 must not be touched. Additionally, + * return error codes other than -ENOBUFS and -ENOENT will terminate the + * dump and return to userspace with an error, so be careful. If any data + * was passed in from userspace then the data/len arguments will be present + * and point to the data contained in %NL80211_ATTR_TESTDATA. + * + * @set_bitrate_mask: set the bitrate mask configuration + * + * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac + * devices running firmwares capable of generating the (re) association + * RSN IE. It allows for faster roaming between WPA2 BSSIDs. + * @del_pmksa: Delete a cached PMKID. + * @flush_pmksa: Flush all cached PMKIDs. + * @set_power_mgmt: Configure WLAN power management. A timeout value of -1 + * allows the driver to adjust the dynamic ps timeout value. + * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. + * @set_cqm_txe_config: Configure connection quality monitor TX error + * thresholds. + * @sched_scan_start: Tell the driver to start a scheduled scan. + * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan. This + * call must stop the scheduled scan and be ready for starting a new one + * before it returns, i.e. @sched_scan_start may be called immediately + * after that again and should not fail in that case. The driver should + * not call cfg80211_sched_scan_stopped() for a requested stop (when this + * method returns 0.) + * + * @mgmt_frame_register: Notify driver that a management frame type was + * registered. Note that this callback may not sleep, and cannot run + * concurrently with itself. + * + * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. + * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may + * reject TX/RX mask combinations they cannot support by returning -EINVAL + * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). + * + * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). + * + * @set_ringparam: Set tx and rx ring sizes. + * + * @get_ringparam: Get tx and rx ring current and maximum sizes. + * + * @tdls_mgmt: Transmit a TDLS management frame. + * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). + * + * @probe_client: probe an associated client, must return a cookie that it + * later passes to cfg80211_probe_status(). + * + * @set_noack_map: Set the NoAck Map for the TIDs. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * See @ethtool_ops.get_sset_count + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * See @ethtool_ops.get_ethtool_stats + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * See @ethtool_ops.get_strings + * + * @get_channel: Get the current operating channel for the virtual interface. + * For monitor interfaces, it should return %NULL unless there's a single + * current monitoring channel. + * + * @start_p2p_device: Start the given P2P device. + * @stop_p2p_device: Stop the given P2P device. + * + * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode. + * Parameters include ACL policy, an array of MAC address of stations + * and the number of MAC addresses. If there is already a list in driver + * this new list replaces the existing one. Driver has to clear its ACL + * when number of MAC addresses entries is passed as 0. Drivers which + * advertise the support for MAC based ACL have to implement this callback. + * + * @start_radar_detection: Start radar detection in the driver. + * + * @update_ft_ies: Provide updated Fast BSS Transition information to the + * driver. If the SME is in the driver/firmware, this information can be + * used in building Authentication and Reassociation Request frames. + * + * @crit_proto_start: Indicates a critical protocol needs more link reliability + * for a given duration (milliseconds). The protocol is provided so the + * driver can take the most appropriate actions. + * @crit_proto_stop: Indicates critical protocol no longer needs increased link + * reliability. This operation can not fail. + * @set_coalesce: Set coalesce parameters. + * + * @channel_switch: initiate channel-switch procedure (with CSA) + * + * @set_qos_map: Set QoS mapping information to the driver + * + * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the + * given interface This is used e.g. for dynamic HT 20/40 MHz channel width + * changes during the lifetime of the BSS. */ struct cfg80211_ops { - int (*add_virtual_intf)(struct wiphy *wiphy, char *name, - enum nl80211_iftype type); - int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); - int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex, - enum nl80211_iftype type); + int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); + int (*resume)(struct wiphy *wiphy); + void (*set_wakeup)(struct wiphy *wiphy, bool enabled); + + struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy, + const char *name, + enum nl80211_iftype type, + u32 *flags, + struct vif_params *params); + int (*del_virtual_intf)(struct wiphy *wiphy, + struct wireless_dev *wdev); + int (*change_virtual_intf)(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, - u8 key_index, u8 *mac_addr, + u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params); int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, - u8 key_index, u8 *mac_addr, void *cookie, + u8 key_index, bool pairwise, const u8 *mac_addr, + void *cookie, void (*callback)(void *cookie, struct key_params*)); int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, - u8 key_index, u8 *mac_addr); + u8 key_index, bool pairwise, const u8 *mac_addr); int (*set_default_key)(struct wiphy *wiphy, struct net_device *netdev, - u8 key_index); + u8 key_index, bool unicast, bool multicast); + int (*set_default_mgmt_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); - int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev, - struct beacon_parameters *info); - int (*set_beacon)(struct wiphy *wiphy, struct net_device *dev, - struct beacon_parameters *info); - int (*del_beacon)(struct wiphy *wiphy, struct net_device *dev); + int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ap_settings *settings); + int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_beacon_data *info); + int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev); int (*add_station)(struct wiphy *wiphy, struct net_device *dev, - u8 *mac, struct station_parameters *params); + const u8 *mac, + struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, - u8 *mac); + const u8 *mac); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, - u8 *mac, struct station_parameters *params); + const u8 *mac, + struct station_parameters *params); int (*get_station)(struct wiphy *wiphy, struct net_device *dev, - u8 *mac, struct station_stats *stats); + const u8 *mac, struct station_info *sinfo); + int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo); + + int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst, const u8 *next_hop); + int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst); + int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, + const u8 *dst, const u8 *next_hop); + int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop, struct mpath_info *pinfo); + int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *next_hop, + struct mpath_info *pinfo); + int (*get_mesh_config)(struct wiphy *wiphy, + struct net_device *dev, + struct mesh_config *conf); + int (*update_mesh_config)(struct wiphy *wiphy, + struct net_device *dev, u32 mask, + const struct mesh_config *nconf); + int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev, + const struct mesh_config *conf, + const struct mesh_setup *setup); + int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); + + int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, + struct bss_parameters *params); + + int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_txq_params *params); + + int (*libertas_set_mesh_channel)(struct wiphy *wiphy, + struct net_device *dev, + struct ieee80211_channel *chan); + + int (*set_monitor_channel)(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef); + + int (*scan)(struct wiphy *wiphy, + struct cfg80211_scan_request *request); + + int (*auth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req); + int (*assoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req); + int (*deauth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req); + int (*disassoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req); + + int (*connect)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); + int (*disconnect)(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); + + int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); + int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev); + + int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev, + int rate[IEEE80211_NUM_BANDS]); + + int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed); + + int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, int mbm); + int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, + int *dbm); + + int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr); + + void (*rfkill_poll)(struct wiphy *wiphy); + +#ifdef CONFIG_NL80211_TESTMODE + int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len); + int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len); +#endif + + int (*set_bitrate_mask)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *peer, + const struct cfg80211_bitrate_mask *mask); + + int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev, + int idx, struct survey_info *info); + + int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa); + int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa); + int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev); + + int (*remain_on_channel)(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct ieee80211_channel *chan, + unsigned int duration, + u64 *cookie); + int (*cancel_remain_on_channel)(struct wiphy *wiphy, + struct wireless_dev *wdev, + u64 cookie); + + int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, + u64 *cookie); + int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, + struct wireless_dev *wdev, + u64 cookie); + + int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout); + + int (*set_cqm_rssi_config)(struct wiphy *wiphy, + struct net_device *dev, + s32 rssi_thold, u32 rssi_hyst); + + int (*set_cqm_txe_config)(struct wiphy *wiphy, + struct net_device *dev, + u32 rate, u32 pkts, u32 intvl); + + void (*mgmt_frame_register)(struct wiphy *wiphy, + struct wireless_dev *wdev, + u16 frame_type, bool reg); + + int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant); + int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant); + + int (*set_ringparam)(struct wiphy *wiphy, u32 tx, u32 rx); + void (*get_ringparam)(struct wiphy *wiphy, + u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); + + int (*sched_scan_start)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request); + int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev); + + int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_gtk_rekey_data *data); + + int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, + const u8 *buf, size_t len); + int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper); + + int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u64 *cookie); + + int (*set_noack_map)(struct wiphy *wiphy, + struct net_device *dev, + u16 noack_map); + + int (*get_et_sset_count)(struct wiphy *wiphy, + struct net_device *dev, int sset); + void (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev, + u32 sset, u8 *data); + + int (*get_channel)(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef); + + int (*start_p2p_device)(struct wiphy *wiphy, + struct wireless_dev *wdev); + void (*stop_p2p_device)(struct wiphy *wiphy, + struct wireless_dev *wdev); + + int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev, + const struct cfg80211_acl_data *params); + + int (*start_radar_detection)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_chan_def *chandef, + u32 cac_time_ms); + int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_update_ft_ies_params *ftie); + int (*crit_proto_start)(struct wiphy *wiphy, + struct wireless_dev *wdev, + enum nl80211_crit_proto_id protocol, + u16 duration); + void (*crit_proto_stop)(struct wiphy *wiphy, + struct wireless_dev *wdev); + int (*set_coalesce)(struct wiphy *wiphy, + struct cfg80211_coalesce *coalesce); + + int (*channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_csa_settings *params); + + int (*set_qos_map)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_qos_map *qos_map); + + int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_chan_def *chandef); +}; + +/* + * wireless hardware and networking interfaces structures + * and registration/helper functions + */ + +/** + * enum wiphy_flags - wiphy capability flags + * + * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this + * wiphy at all + * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled + * by default -- this flag will be set depending on the kernel's default + * on wiphy_new(), but can be changed by the driver if it has a good + * reason to override the default + * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station + * on a VLAN interface) + * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station + * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the + * control port protocol ethertype. The device also honours the + * control_port_no_encrypt flag. + * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. + * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing + * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. + * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans. + * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the + * firmware. + * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. + * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation. + * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z) + * link setup/discovery operations internally. Setup, discovery and + * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT + * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be + * used for asking the driver/firmware to perform a TDLS operation. + * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME + * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes + * when there are virtual interfaces in AP mode by calling + * cfg80211_report_obss_beacon(). + * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device + * responds to probe-requests in hardware. + * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. + * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. + * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. + * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in + * beaconing mode (AP, IBSS, Mesh, ...). + */ +enum wiphy_flags { + /* use hole at 0 */ + /* use hole at 1 */ + /* use hole at 2 */ + WIPHY_FLAG_NETNS_OK = BIT(3), + WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), + WIPHY_FLAG_4ADDR_AP = BIT(5), + WIPHY_FLAG_4ADDR_STATION = BIT(6), + WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), + WIPHY_FLAG_IBSS_RSN = BIT(8), + WIPHY_FLAG_MESH_AUTH = BIT(10), + WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11), + /* use hole at 12 */ + WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), + WIPHY_FLAG_AP_UAPSD = BIT(14), + WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), + WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), + WIPHY_FLAG_HAVE_AP_SME = BIT(17), + WIPHY_FLAG_REPORTS_OBSS = BIT(18), + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), + WIPHY_FLAG_OFFCHAN_TX = BIT(20), + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), + WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), + WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), +}; + +/** + * struct ieee80211_iface_limit - limit on certain interface types + * @max: maximum number of interfaces of these types + * @types: interface types (bits) + */ +struct ieee80211_iface_limit { + u16 max; + u16 types; +}; + +/** + * struct ieee80211_iface_combination - possible interface combination + * @limits: limits for the given interface types + * @n_limits: number of limitations + * @num_different_channels: can use up to this many different channels + * @max_interfaces: maximum number of interfaces in total allowed in this + * group + * @beacon_int_infra_match: In this combination, the beacon intervals + * between infrastructure and AP types must match. This is required + * only in special cases. + * @radar_detect_widths: bitmap of channel widths supported for radar detection + * @radar_detect_regions: bitmap of regions supported for radar detection + * + * With this structure the driver can describe which interface + * combinations it supports concurrently. + * + * Examples: + * + * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total: + * + * struct ieee80211_iface_limit limits1[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, + * }; + * struct ieee80211_iface_combination combination1 = { + * .limits = limits1, + * .n_limits = ARRAY_SIZE(limits1), + * .max_interfaces = 2, + * .beacon_int_infra_match = true, + * }; + * + * + * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total: + * + * struct ieee80211_iface_limit limits2[] = { + * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | + * BIT(NL80211_IFTYPE_P2P_GO), }, + * }; + * struct ieee80211_iface_combination combination2 = { + * .limits = limits2, + * .n_limits = ARRAY_SIZE(limits2), + * .max_interfaces = 8, + * .num_different_channels = 1, + * }; + * + * + * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total. + * + * This allows for an infrastructure connection and three P2P connections. + * + * struct ieee80211_iface_limit limits3[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | + * BIT(NL80211_IFTYPE_P2P_CLIENT), }, + * }; + * struct ieee80211_iface_combination combination3 = { + * .limits = limits3, + * .n_limits = ARRAY_SIZE(limits3), + * .max_interfaces = 4, + * .num_different_channels = 2, + * }; + */ +struct ieee80211_iface_combination { + const struct ieee80211_iface_limit *limits; + u32 num_different_channels; + u16 max_interfaces; + u8 n_limits; + bool beacon_int_infra_match; + u8 radar_detect_widths; + u8 radar_detect_regions; +}; + +struct ieee80211_txrx_stypes { + u16 tx, rx; }; +/** + * enum wiphy_wowlan_support_flags - WoWLAN support flags + * @WIPHY_WOWLAN_ANY: supports wakeup for the special "any" + * trigger that keeps the device operating as-is and + * wakes up the host on any activity, for example a + * received packet that passed filtering; note that the + * packet should be preserved in that case + * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet + * (see nl80211.h) + * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect + * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep + * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure + * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request + * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure + * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release + */ +enum wiphy_wowlan_support_flags { + WIPHY_WOWLAN_ANY = BIT(0), + WIPHY_WOWLAN_MAGIC_PKT = BIT(1), + WIPHY_WOWLAN_DISCONNECT = BIT(2), + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3), + WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4), + WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), + WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), + WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), +}; + +struct wiphy_wowlan_tcp_support { + const struct nl80211_wowlan_tcp_data_token_feature *tok; + u32 data_payload_max; + u32 data_interval_max; + u32 wake_payload_max; + bool seq; +}; + +/** + * struct wiphy_wowlan_support - WoWLAN support data + * @flags: see &enum wiphy_wowlan_support_flags + * @n_patterns: number of supported wakeup patterns + * (see nl80211.h for the pattern definition) + * @pattern_max_len: maximum length of each pattern + * @pattern_min_len: minimum length of each pattern + * @max_pkt_offset: maximum Rx packet offset + * @tcp: TCP wakeup support information + */ +struct wiphy_wowlan_support { + u32 flags; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; + const struct wiphy_wowlan_tcp_support *tcp; +}; + +/** + * struct wiphy_coalesce_support - coalesce support data + * @n_rules: maximum number of coalesce rules + * @max_delay: maximum supported coalescing delay in msecs + * @n_patterns: number of supported patterns in a rule + * (see nl80211.h for the pattern definition) + * @pattern_max_len: maximum length of each pattern + * @pattern_min_len: minimum length of each pattern + * @max_pkt_offset: maximum Rx packet offset + */ +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +/** + * enum wiphy_vendor_command_flags - validation flags for vendor commands + * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev + * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev + * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running + * (must be combined with %_WDEV or %_NETDEV) + */ +enum wiphy_vendor_command_flags { + WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0), + WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1), + WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2), +}; + +/** + * struct wiphy_vendor_command - vendor command definition + * @info: vendor command identifying information, as used in nl80211 + * @flags: flags, see &enum wiphy_vendor_command_flags + * @doit: callback for the operation, note that wdev is %NULL if the + * flags didn't ask for a wdev and non-%NULL otherwise; the data + * pointer may be %NULL if userspace provided no data at all + */ +struct wiphy_vendor_command { + struct nl80211_vendor_cmd_info info; + u32 flags; + int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +}; + +/** + * struct wiphy - wireless hardware description + * @reg_notifier: the driver's regulatory notification callback, + * note that if your driver uses wiphy_apply_custom_regulatory() + * the reg_notifier's request can be passed as NULL + * @regd: the driver's regulatory domain, if one was requested via + * the regulatory_hint() API. This can be used by the driver + * on the reg_notifier() if it chooses to ignore future + * regulatory domain changes caused by other drivers. + * @signal_type: signal type reported in &struct cfg80211_bss. + * @cipher_suites: supported cipher suites + * @n_cipher_suites: number of supported cipher suites + * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) + * @retry_long: Retry limit for long frames (dot11LongRetryLimit) + * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); + * -1 = fragmentation disabled, only odd values >= 256 used + * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled + * @_net: the network namespace this wiphy currently lives in + * @perm_addr: permanent MAC address of this device + * @addr_mask: If the device supports multiple MAC addresses by masking, + * set this to a mask with variable bits set to 1, e.g. if the last + * four bits are variable then set it to 00-00-00-00-00-0f. The actual + * variable bits shall be determined by the interfaces added, with + * interfaces not matching the mask being rejected to be brought up. + * @n_addresses: number of addresses in @addresses. + * @addresses: If the device has more than one address, set this pointer + * to a list of addresses (6 bytes each). The first one will be used + * by default for perm_addr. In this case, the mask should be set to + * all-zeroes. In this case it is assumed that the device can handle + * the same number of arbitrary MAC addresses. + * @registered: protects ->resume and ->suspend sysfs callbacks against + * unregister hardware + * @debugfsdir: debugfs directory used for this wiphy, will be renamed + * automatically on wiphy renames + * @dev: (virtual) struct device for this wiphy + * @registered: helps synchronize suspend/resume with wiphy unregister + * @wext: wireless extension handlers + * @priv: driver private data (sized according to wiphy_new() parameter) + * @interface_modes: bitmask of interfaces types valid for this wiphy, + * must be set by driver + * @iface_combinations: Valid interface combinations array, should not + * list single interface types. + * @n_iface_combinations: number of entries in @iface_combinations array. + * @software_iftypes: bitmask of software interface types, these are not + * subject to any restrictions since they are purely managed in SW. + * @flags: wiphy flags, see &enum wiphy_flags + * @regulatory_flags: wiphy regulatory flags, see + * &enum ieee80211_regulatory_flags + * @features: features advertised to nl80211, see &enum nl80211_feature_flags. + * @bss_priv_size: each BSS struct has private data allocated with it, + * this variable determines its size + * @max_scan_ssids: maximum number of SSIDs the device can scan for in + * any given scan + * @max_sched_scan_ssids: maximum number of SSIDs the device can scan + * for in any given scheduled scan + * @max_match_sets: maximum number of match sets the device can handle + * when performing a scheduled scan, 0 if filtering is not + * supported. + * @max_scan_ie_len: maximum length of user-controlled IEs device can + * add to probe request frames transmitted during a scan, must not + * include fixed IEs like supported rates + * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled + * scans + * @coverage_class: current coverage class + * @fw_version: firmware version for ethtool reporting + * @hw_version: hardware version for ethtool reporting + * @max_num_pmkids: maximum number of PMKIDs supported by device + * @privid: a pointer that drivers can use to identify if an arbitrary + * wiphy is theirs, e.g. in global notifiers + * @bands: information about bands/channels supported by this device + * + * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or + * transmitted through nl80211, points to an array indexed by interface + * type + * + * @available_antennas_tx: bitmap of antennas which are available to be + * configured as TX antennas. Antenna configuration commands will be + * rejected unless this or @available_antennas_rx is set. + * + * @available_antennas_rx: bitmap of antennas which are available to be + * configured as RX antennas. Antenna configuration commands will be + * rejected unless this or @available_antennas_tx is set. + * + * @probe_resp_offload: + * Bitmap of supported protocols for probe response offloading. + * See &enum nl80211_probe_resp_offload_support_attr. Only valid + * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. + * + * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation + * may request, if implemented. + * + * @wowlan: WoWLAN support information + * @wowlan_config: current WoWLAN configuration; this should usually not be + * used since access to it is necessarily racy, use the parameter passed + * to the suspend() operation instead. + * + * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. + * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. + * If null, then none can be over-ridden. + * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden. + * If null, then none can be over-ridden. + * + * @max_acl_mac_addrs: Maximum number of MAC addresses that the device + * supports for ACL. + * + * @extended_capabilities: extended capabilities supported by the driver, + * additional capabilities might be supported by userspace; these are + * the 802.11 extended capabilities ("Extended Capabilities element") + * and are in the same format as in the information element. See + * 802.11-2012 8.4.2.29 for the defined fields. + * @extended_capabilities_mask: mask of the valid values + * @extended_capabilities_len: length of the extended capabilities + * @coalesce: packet coalescing support information + * + * @vendor_commands: array of vendor commands supported by the hardware + * @n_vendor_commands: number of vendor commands + * @vendor_events: array of vendor events supported by the hardware + * @n_vendor_events: number of vendor events + * + * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode + * (including P2P GO) or 0 to indicate no such limit is advertised. The + * driver is allowed to advertise a theoretical limit that it can reach in + * some cases, but may not always reach. + * + * @max_num_csa_counters: Number of supported csa_counters in beacons + * and probe responses. This value should be set if the driver + * wishes to limit the number of csa counters. Default (0) means + * infinite. + * @max_adj_channel_rssi_comp: max offset of between the channel on which the + * frame was sent and the channel on which the frame was heard for which + * the reported rssi is still valid. If a driver is able to compensate the + * low rssi when a frame is heard on different channel, then it should set + * this variable to the maximal offset for which it can compensate. + * This value should be set in MHz. + */ +struct wiphy { + /* assign these fields before you register the wiphy */ + + /* permanent MAC address(es) */ + u8 perm_addr[ETH_ALEN]; + u8 addr_mask[ETH_ALEN]; + + struct mac_address *addresses; + + const struct ieee80211_txrx_stypes *mgmt_stypes; + + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u16 software_iftypes; + + u16 n_addresses; + + /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ + u16 interface_modes; + + u16 max_acl_mac_addrs; + + u32 flags, regulatory_flags, features; + + u32 ap_sme_capa; + + enum cfg80211_signal_type signal_type; + + int bss_priv_size; + u8 max_scan_ssids; + u8 max_sched_scan_ssids; + u8 max_match_sets; + u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; + + int n_cipher_suites; + const u32 *cipher_suites; + + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + + char fw_version[ETHTOOL_FWVERS_LEN]; + u32 hw_version; + +#ifdef CONFIG_PM + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; +#endif + + u16 max_remain_on_channel_duration; + + u8 max_num_pmkids; + + u32 available_antennas_tx; + u32 available_antennas_rx; + + /* + * Bitmap of supported protocols for probe response offloading + * see &enum nl80211_probe_resp_offload_support_attr. Only valid + * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. + */ + u32 probe_resp_offload; + + const u8 *extended_capabilities, *extended_capabilities_mask; + u8 extended_capabilities_len; + + /* If multiple wiphys are registered and you're handed e.g. + * a regular netdev with assigned ieee80211_ptr, you won't + * know whether it points to a wiphy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wiphy or not. */ + const void *privid; + + struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; + + /* Lets us get back the wiphy on the callback */ + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request); + + /* fields below are read-only, assigned by cfg80211 */ + + const struct ieee80211_regdomain __rcu *regd; + + /* the item in /sys/class/ieee80211/ points to this, + * you need use set_wiphy_dev() (see below) */ + struct device dev; + + /* protects ->resume, ->suspend sysfs callbacks against unregister hw */ + bool registered; + + /* dir in debugfs: ieee80211/<wiphyname> */ + struct dentry *debugfsdir; + + const struct ieee80211_ht_cap *ht_capa_mod_mask; + const struct ieee80211_vht_cap *vht_capa_mod_mask; + +#ifdef CONFIG_NET_NS + /* the network namespace this phy lives in currently */ + struct net *_net; +#endif + +#ifdef CONFIG_CFG80211_WEXT + const struct iw_handler_def *wext; +#endif + + const struct wiphy_coalesce_support *coalesce; + + const struct wiphy_vendor_command *vendor_commands; + const struct nl80211_vendor_cmd_info *vendor_events; + int n_vendor_commands, n_vendor_events; + + u16 max_ap_assoc_sta; + + u8 max_num_csa_counters; + u8 max_adj_channel_rssi_comp; + + char priv[0] __aligned(NETDEV_ALIGN); +}; + +static inline struct net *wiphy_net(struct wiphy *wiphy) +{ + return read_pnet(&wiphy->_net); +} + +static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) +{ + write_pnet(&wiphy->_net, net); +} + +/** + * wiphy_priv - return priv from wiphy + * + * @wiphy: the wiphy whose priv pointer to return + * Return: The priv of @wiphy. + */ +static inline void *wiphy_priv(struct wiphy *wiphy) +{ + BUG_ON(!wiphy); + return &wiphy->priv; +} + +/** + * priv_to_wiphy - return the wiphy containing the priv + * + * @priv: a pointer previously returned by wiphy_priv + * Return: The wiphy of @priv. + */ +static inline struct wiphy *priv_to_wiphy(void *priv) +{ + BUG_ON(!priv); + return container_of(priv, struct wiphy, priv); +} + +/** + * set_wiphy_dev - set device pointer for wiphy + * + * @wiphy: The wiphy whose device to bind + * @dev: The device to parent it to + */ +static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) +{ + wiphy->dev.parent = dev; +} + +/** + * wiphy_dev - get wiphy dev pointer + * + * @wiphy: The wiphy whose device struct to look up + * Return: The dev of @wiphy. + */ +static inline struct device *wiphy_dev(struct wiphy *wiphy) +{ + return wiphy->dev.parent; +} + +/** + * wiphy_name - get wiphy name + * + * @wiphy: The wiphy whose name to return + * Return: The name of @wiphy. + */ +static inline const char *wiphy_name(const struct wiphy *wiphy) +{ + return dev_name(&wiphy->dev); +} + +/** + * wiphy_new - create a new wiphy for use with cfg80211 + * + * @ops: The configuration operations for this device + * @sizeof_priv: The size of the private area to allocate + * + * Create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * Return: A pointer to the new wiphy. This pointer must be + * assigned to each netdev's ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); + +/** + * wiphy_register - register a wiphy with cfg80211 + * + * @wiphy: The wiphy to register. + * + * Return: A non-negative wiphy index or a negative error code. + */ +int wiphy_register(struct wiphy *wiphy); + +/** + * wiphy_unregister - deregister a wiphy from cfg80211 + * + * @wiphy: The wiphy to unregister. + * + * After this call, no more requests can be made with this priv + * pointer, but the call may sleep to wait for an outstanding + * request that is being handled. + */ +void wiphy_unregister(struct wiphy *wiphy); + +/** + * wiphy_free - free wiphy + * + * @wiphy: The wiphy to free + */ +void wiphy_free(struct wiphy *wiphy); + +/* internal structs */ +struct cfg80211_conn; +struct cfg80211_internal_bss; +struct cfg80211_cached_keys; + +/** + * struct wireless_dev - wireless device state + * + * For netdevs, this structure must be allocated by the driver + * that uses the ieee80211_ptr field in struct net_device (this + * is intentional so it can be allocated along with the netdev.) + * It need not be registered then as netdev registration will + * be intercepted by cfg80211 to see the new wireless device. + * + * For non-netdev uses, it must also be allocated by the driver + * in response to the cfg80211 callbacks that require it, as + * there's no netdev registration in that case it may not be + * allocated outside of callback operations that return it. + * + * @wiphy: pointer to hardware description + * @iftype: interface type + * @list: (private) Used to collect the interfaces + * @netdev: (private) Used to reference back to the netdev, may be %NULL + * @identifier: (private) Identifier used in nl80211 to identify this + * wireless device if it has no netdev + * @current_bss: (private) Used by the internal configuration code + * @chandef: (private) Used by the internal configuration code to track + * the user-set channel definition. + * @preset_chandef: (private) Used by the internal configuration code to + * track the channel to be used for AP later + * @bssid: (private) Used by the internal configuration code + * @ssid: (private) Used by the internal configuration code + * @ssid_len: (private) Used by the internal configuration code + * @mesh_id_len: (private) Used by the internal configuration code + * @mesh_id_up_len: (private) Used by the internal configuration code + * @wext: (private) Used by the internal wireless extensions compat code + * @use_4addr: indicates 4addr mode is used on this interface, must be + * set by driver (if supported) on add_interface BEFORE registering the + * netdev and may otherwise be used by driver read-only, will be update + * by cfg80211 on change_interface + * @mgmt_registrations: list of registrations for management frames + * @mgmt_registrations_lock: lock for the list + * @mtx: mutex used to lock data in this struct, may be used by drivers + * and some API functions require it held + * @beacon_interval: beacon interval used on this device for transmitting + * beacons, 0 when not valid + * @address: The address for this device, valid only if @netdev is %NULL + * @p2p_started: true if this is a P2P Device that has been started + * @cac_started: true if DFS channel availability check has been started + * @cac_start_time: timestamp (jiffies) when the dfs state was entered. + * @cac_time_ms: CAC time in ms + * @ps: powersave mode is enabled + * @ps_timeout: dynamic powersave timeout + * @ap_unexpected_nlportid: (private) netlink port ID of application + * registered for unexpected class 3 frames (AP mode) + * @conn: (private) cfg80211 software SME connection state machine data + * @connect_keys: (private) keys to set after connection is established + * @ibss_fixed: (private) IBSS is using fixed BSSID + * @ibss_dfs_possible: (private) IBSS may change to a DFS channel + * @event_list: (private) list for internal event processing + * @event_lock: (private) lock for event list + * @owner_nlportid: (private) owner socket port ID + */ +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + + /* the remainder of this struct should be private to cfg80211 */ + struct list_head list; + struct net_device *netdev; + + u32 identifier; + + struct list_head mgmt_registrations; + spinlock_t mgmt_registrations_lock; + + struct mutex mtx; + + bool use_4addr, p2p_started; + + u8 address[ETH_ALEN] __aligned(sizeof(u16)); + + /* currently used for IBSS and SME - might be rearranged later */ + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len, mesh_id_len, mesh_id_up_len; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + + struct list_head event_list; + spinlock_t event_lock; + + struct cfg80211_internal_bss *current_bss; /* associated / joined */ + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + + bool ibss_fixed; + bool ibss_dfs_possible; + + bool ps; + int ps_timeout; + + int beacon_interval; + + u32 ap_unexpected_nlportid; + + bool cac_started; + unsigned long cac_start_time; + unsigned int cac_time_ms; + + u32 owner_nlportid; + +#ifdef CONFIG_CFG80211_WEXT + /* wext data */ + struct { + struct cfg80211_ibss_params ibss; + struct cfg80211_connect_params connect; + struct cfg80211_cached_keys *keys; + const u8 *ie; + size_t ie_len; + u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + s8 default_key, default_mgmt_key; + bool prev_bssid_valid; + } wext; +#endif +}; + +static inline u8 *wdev_address(struct wireless_dev *wdev) +{ + if (wdev->netdev) + return wdev->netdev->dev_addr; + return wdev->address; +} + +/** + * wdev_priv - return wiphy priv from wireless_dev + * + * @wdev: The wireless device whose wiphy's priv pointer to return + * Return: The wiphy priv of @wdev. + */ +static inline void *wdev_priv(struct wireless_dev *wdev) +{ + BUG_ON(!wdev); + return wiphy_priv(wdev->wiphy); +} + +/** + * DOC: Utility functions + * + * cfg80211 offers a number of utility functions that can be useful. + */ + +/** + * ieee80211_channel_to_frequency - convert channel number to frequency + * @chan: channel number + * @band: band, necessary due to channel number overlap + * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. + */ +int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band); + +/** + * ieee80211_frequency_to_channel - convert frequency to channel number + * @freq: center frequency + * Return: The corresponding channel, or 0 if the conversion failed. + */ +int ieee80211_frequency_to_channel(int freq); + +/* + * Name indirection necessary because the ieee80211 code also has + * a function named "ieee80211_get_channel", so if you include + * cfg80211's header file you get cfg80211's version, if you try + * to include both header files you'll (rightfully!) get a symbol + * clash. + */ +struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, + int freq); +/** + * ieee80211_get_channel - get channel struct from wiphy for specified frequency + * @wiphy: the struct wiphy to get the channel for + * @freq: the center frequency of the channel + * Return: The channel struct from @wiphy at @freq. + */ +static inline struct ieee80211_channel * +ieee80211_get_channel(struct wiphy *wiphy, int freq) +{ + return __ieee80211_get_channel(wiphy, freq); +} + +/** + * ieee80211_get_response_rate - get basic rate for a given rate + * + * @sband: the band to look for rates in + * @basic_rates: bitmap of basic rates + * @bitrate: the bitrate for which to find the basic rate + * + * Return: The basic rate corresponding to a given bitrate, that + * is the next lower bitrate contained in the basic rate map, + * which is, for this function, given as a bitmap of indices of + * rates in the band's bitrate table. + */ +struct ieee80211_rate * +ieee80211_get_response_rate(struct ieee80211_supported_band *sband, + u32 basic_rates, int bitrate); + +/** + * ieee80211_mandatory_rates - get mandatory rates for a given band + * @sband: the band to look for rates in + * @scan_width: width of the control channel + * + * This function returns a bitmap of the mandatory rates for the given + * band, bits are set according to the rate position in the bitrates array. + */ +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, + enum nl80211_bss_scan_width scan_width); + +/* + * Radiotap parsing functions -- for controlled injection support + * + * Implemented in net/wireless/radiotap.c + * Documentation in Documentation/networking/radiotap-headers.txt + */ + +struct radiotap_align_size { + uint8_t align:4, size:4; +}; + +struct ieee80211_radiotap_namespace { + const struct radiotap_align_size *align_size; + int n_bits; + uint32_t oui; + uint8_t subns; +}; + +struct ieee80211_radiotap_vendor_namespaces { + const struct ieee80211_radiotap_namespace *ns; + int n_ns; +}; + +/** + * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args + * @this_arg_index: index of current arg, valid after each successful call + * to ieee80211_radiotap_iterator_next() + * @this_arg: pointer to current radiotap arg; it is valid after each + * call to ieee80211_radiotap_iterator_next() but also after + * ieee80211_radiotap_iterator_init() where it will point to + * the beginning of the actual data portion + * @this_arg_size: length of the current arg, for convenience + * @current_namespace: pointer to the current namespace definition + * (or internally %NULL if the current namespace is unknown) + * @is_radiotap_ns: indicates whether the current namespace is the default + * radiotap namespace or not + * + * @_rtheader: pointer to the radiotap header we are walking through + * @_max_length: length of radiotap header in cpu byte ordering + * @_arg_index: next argument index + * @_arg: next argument pointer + * @_next_bitmap: internal pointer to next present u32 + * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present + * @_vns: vendor namespace definitions + * @_next_ns_data: beginning of the next namespace's data + * @_reset_on_ext: internal; reset the arg index to 0 when going to the + * next bitmap word + * + * Describes the radiotap parser state. Fields prefixed with an underscore + * must not be used by users of the parser, only by the parser internally. + */ + +struct ieee80211_radiotap_iterator { + struct ieee80211_radiotap_header *_rtheader; + const struct ieee80211_radiotap_vendor_namespaces *_vns; + const struct ieee80211_radiotap_namespace *current_namespace; + + unsigned char *_arg, *_next_ns_data; + __le32 *_next_bitmap; + + unsigned char *this_arg; + int this_arg_index; + int this_arg_size; + + int is_radiotap_ns; + + int _max_length; + int _arg_index; + uint32_t _bitmap_shifter; + int _reset_on_ext; +}; + +int +ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator, + struct ieee80211_radiotap_header *radiotap_header, + int max_length, + const struct ieee80211_radiotap_vendor_namespaces *vns); + +int +ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator); + + +extern const unsigned char rfc1042_header[6]; +extern const unsigned char bridge_tunnel_header[6]; + +/** + * ieee80211_get_hdrlen_from_skb - get header length from data + * + * @skb: the frame + * + * Given an skb with a raw 802.11 header at the data pointer this function + * returns the 802.11 header length. + * + * Return: The 802.11 header length in bytes (not including encryption + * headers). Or 0 if the data in the sk_buff is too short to contain a valid + * 802.11 header. + */ +unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); + +/** + * ieee80211_hdrlen - get header length in bytes from frame control + * @fc: frame control field in little-endian format + * Return: The header length in bytes. + */ +unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); + +/** + * ieee80211_get_mesh_hdrlen - get mesh extension header length + * @meshhdr: the mesh extension header, only the flags field + * (first byte) will be accessed + * Return: The length of the extension header, which is always at + * least 6 bytes and at most 18 if address 5 and 6 are present. + */ +unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); + +/** + * DOC: Data path helpers + * + * In addition to generic utilities, cfg80211 also offers + * functions that help implement the data path for devices + * that do not do the 802.11/802.3 conversion on the device. + */ + +/** + * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 + * @skb: the 802.11 data frame + * @addr: the device MAC address + * @iftype: the virtual interface type + * Return: 0 on success. Non-zero on error. + */ +int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype); + +/** + * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 + * @skb: the 802.3 frame + * @addr: the device MAC address + * @iftype: the virtual interface type + * @bssid: the network bssid (used only for iftype STATION and ADHOC) + * @qos: build 802.11 QoS data frame + * Return: 0 on success, or a negative error code. + */ +int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype, const u8 *bssid, + bool qos); + +/** + * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame + * + * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of + * 802.3 frames. The @list will be empty if the decode fails. The + * @skb is consumed after the function returns. + * + * @skb: The input IEEE 802.11n A-MSDU frame. + * @list: The output list of 802.3 frames. It must be allocated and + * initialized by by the caller. + * @addr: The device MAC address. + * @iftype: The device interface type. + * @extra_headroom: The hardware extra headroom for SKBs in the @list. + * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. + */ +void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + const u8 *addr, enum nl80211_iftype iftype, + const unsigned int extra_headroom, + bool has_80211_header); + +/** + * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame + * @skb: the data frame + * @qos_map: Interworking QoS mapping or %NULL if not in use + * Return: The 802.1p/1d tag. + */ +unsigned int cfg80211_classify8021d(struct sk_buff *skb, + struct cfg80211_qos_map *qos_map); + +/** + * cfg80211_find_ie - find information element in data + * + * @eid: element ID + * @ies: data consisting of IEs + * @len: length of data + * + * Return: %NULL if the element ID could not be found or if + * the element is invalid (claims to be longer than the given + * data), or a pointer to the first byte of the requested + * element, that is the byte containing the element ID. + * + * Note: There are no checks on the element length other than + * having to fit into the given data. + */ +const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); + +/** + * cfg80211_find_vendor_ie - find vendor specific information element in data + * + * @oui: vendor OUI + * @oui_type: vendor-specific OUI type + * @ies: data consisting of IEs + * @len: length of data + * + * Return: %NULL if the vendor specific element ID could not be found or if the + * element is invalid (claims to be longer than the given data), or a pointer to + * the first byte of the requested element, that is the byte containing the + * element ID. + * + * Note: There are no checks on the element length other than having to fit into + * the given data. + */ +const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type, + const u8 *ies, int len); + +/** + * DOC: Regulatory enforcement infrastructure + * + * TODO + */ + +/** + * regulatory_hint - driver hint to the wireless core a regulatory domain + * @wiphy: the wireless device giving the hint (used only for reporting + * conflicts) + * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain + * should be in. If @rd is set this should be NULL. Note that if you + * set this to NULL you should still set rd->alpha2 to some accepted + * alpha2. + * + * Wireless drivers can use this function to hint to the wireless core + * what it believes should be the current regulatory domain by + * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory + * domain should be in or by providing a completely build regulatory domain. + * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried + * for a regulatory domain structure for the respective country. + * + * The wiphy must have been registered to cfg80211 prior to this call. + * For cfg80211 drivers this means you must first use wiphy_register(), + * for mac80211 drivers you must first use ieee80211_register_hw(). + * + * Drivers should check the return value, its possible you can get + * an -ENOMEM. + * + * Return: 0 on success. -ENOMEM. + */ +int regulatory_hint(struct wiphy *wiphy, const char *alpha2); + +/** + * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain + * @wiphy: the wireless device we want to process the regulatory domain on + * @regd: the custom regulatory domain to use for this wiphy + * + * Drivers can sometimes have custom regulatory domains which do not apply + * to a specific country. Drivers can use this to apply such custom regulatory + * domains. This routine must be called prior to wiphy registration. The + * custom regulatory domain will be trusted completely and as such previous + * default channel settings will be disregarded. If no rule is found for a + * channel on the regulatory domain the channel will be disabled. + * Drivers using this for a wiphy should also set the wiphy flag + * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy + * that called this helper. + */ +void wiphy_apply_custom_regulatory(struct wiphy *wiphy, + const struct ieee80211_regdomain *regd); + +/** + * freq_reg_info - get regulatory information for the given frequency + * @wiphy: the wiphy for which we want to process this rule for + * @center_freq: Frequency in KHz for which we want regulatory information for + * + * Use this function to get the regulatory rule for a specific frequency on + * a given wireless device. If the device has a specific regulatory domain + * it wants to follow we respect that unless a country IE has been received + * and processed already. + * + * Return: A valid pointer, or, when an error occurs, for example if no rule + * can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to + * check and PTR_ERR() to obtain the numeric return value. The numeric return + * value will be -ERANGE if we determine the given center_freq does not even + * have a regulatory rule for a frequency range in the center_freq's band. + * See freq_in_rule_band() for our current definition of a band -- this is + * purely subjective and right now it's 802.11 specific. + */ +const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, + u32 center_freq); + +/** + * reg_initiator_name - map regulatory request initiator enum to name + * @initiator: the regulatory request initiator + * + * You can use this to map the regulatory request initiator enum to a + * proper string representation. + */ +const char *reg_initiator_name(enum nl80211_reg_initiator initiator); + +/* + * callbacks for asynchronous cfg80211 methods, notification + * functions and BSS handling helpers + */ + +/** + * cfg80211_scan_done - notify that scan finished + * + * @request: the corresponding scan request + * @aborted: set to true if the scan was aborted for any reason, + * userspace will be notified of that + */ +void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted); + +/** + * cfg80211_sched_scan_results - notify that new scan results are available + * + * @wiphy: the wiphy which got scheduled scan results + */ +void cfg80211_sched_scan_results(struct wiphy *wiphy); + +/** + * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped + * + * @wiphy: the wiphy on which the scheduled scan stopped + * + * The driver can call this function to inform cfg80211 that the + * scheduled scan had to be stopped, for whatever reason. The driver + * is then called back via the sched_scan_stop operation when done. + */ +void cfg80211_sched_scan_stopped(struct wiphy *wiphy); + +/** + * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped + * + * @wiphy: the wiphy on which the scheduled scan stopped + * + * The driver can call this function to inform cfg80211 that the + * scheduled scan had to be stopped, for whatever reason. The driver + * is then called back via the sched_scan_stop operation when done. + * This function should be called with rtnl locked. + */ +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); + +/** + * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame + * + * @wiphy: the wiphy reporting the BSS + * @rx_channel: The channel the frame was received on + * @scan_width: width of the control channel + * @mgmt: the management frame (probe response or beacon) + * @len: length of the management frame + * @signal: the signal strength, type depends on the wiphy's signal_type + * @gfp: context flags + * + * This informs cfg80211 that BSS information was found and + * the BSS should be updated/added. + * + * Return: A referenced struct, must be released with cfg80211_put_bss()! + * Or %NULL on error. + */ +struct cfg80211_bss * __must_check +cfg80211_inform_bss_width_frame(struct wiphy *wiphy, + struct ieee80211_channel *rx_channel, + enum nl80211_bss_scan_width scan_width, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp); + +static inline struct cfg80211_bss * __must_check +cfg80211_inform_bss_frame(struct wiphy *wiphy, + struct ieee80211_channel *rx_channel, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp) +{ + return cfg80211_inform_bss_width_frame(wiphy, rx_channel, + NL80211_BSS_CHAN_WIDTH_20, + mgmt, len, signal, gfp); +} + +/** + * cfg80211_inform_bss - inform cfg80211 of a new BSS + * + * @wiphy: the wiphy reporting the BSS + * @rx_channel: The channel the frame was received on + * @scan_width: width of the control channel + * @bssid: the BSSID of the BSS + * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) + * @capability: the capability field sent by the peer + * @beacon_interval: the beacon interval announced by the peer + * @ie: additional IEs sent by the peer + * @ielen: length of the additional IEs + * @signal: the signal strength, type depends on the wiphy's signal_type + * @gfp: context flags + * + * This informs cfg80211 that BSS information was found and + * the BSS should be updated/added. + * + * Return: A referenced struct, must be released with cfg80211_put_bss()! + * Or %NULL on error. + */ +struct cfg80211_bss * __must_check +cfg80211_inform_bss_width(struct wiphy *wiphy, + struct ieee80211_channel *rx_channel, + enum nl80211_bss_scan_width scan_width, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp); + +static inline struct cfg80211_bss * __must_check +cfg80211_inform_bss(struct wiphy *wiphy, + struct ieee80211_channel *rx_channel, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp) +{ + return cfg80211_inform_bss_width(wiphy, rx_channel, + NL80211_BSS_CHAN_WIDTH_20, + bssid, tsf, capability, + beacon_interval, ie, ielen, signal, + gfp); +} + +struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len, + u16 capa_mask, u16 capa_val); +static inline struct cfg80211_bss * +cfg80211_get_ibss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *ssid, size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len, + WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); +} + +/** + * cfg80211_ref_bss - reference BSS struct + * @wiphy: the wiphy this BSS struct belongs to + * @bss: the BSS struct to reference + * + * Increments the refcount of the given BSS struct. + */ +void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); + +/** + * cfg80211_put_bss - unref BSS struct + * @wiphy: the wiphy this BSS struct belongs to + * @bss: the BSS struct + * + * Decrements the refcount of the given BSS struct. + */ +void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); + +/** + * cfg80211_unlink_bss - unlink BSS from internal data structures + * @wiphy: the wiphy + * @bss: the bss to remove + * + * This function removes the given BSS from the internal data structures + * thereby making it no longer show up in scan results etc. Use this + * function when you detect a BSS is gone. Normally BSSes will also time + * out, so it is not necessary to use this function at all. + */ +void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); + +static inline enum nl80211_bss_scan_width +cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return NL80211_BSS_CHAN_WIDTH_5; + case NL80211_CHAN_WIDTH_10: + return NL80211_BSS_CHAN_WIDTH_10; + default: + return NL80211_BSS_CHAN_WIDTH_20; + } +} + +/** + * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame + * @dev: network device + * @buf: authentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever an authentication, disassociation or + * deauthentication frame has been received and processed in station mode. + * After being asked to authenticate via cfg80211_ops::auth() the driver must + * call either this function or cfg80211_auth_timeout(). + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * While connected, the driver must calls this for received and processed + * disassociation and deauthentication frames. If the frame couldn't be used + * because it was unprotected, the driver must call the function + * cfg80211_rx_unprot_mlme_mgmt() instead. + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. + */ +void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_auth_timeout - notification of timed out authentication + * @dev: network device + * @addr: The MAC address of the device with which the authentication timed out + * + * This function may sleep. The caller must hold the corresponding wdev's + * mutex. + */ +void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); + +/** + * cfg80211_rx_assoc_resp - notification of processed association response + * @dev: network device + * @bss: the BSS that association was requested with, ownership of the pointer + * moves to cfg80211 in this call + * @buf: authentication frame (header + body) + * @len: length of the frame data + * + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. + */ +void cfg80211_rx_assoc_resp(struct net_device *dev, + struct cfg80211_bss *bss, + const u8 *buf, size_t len); + +/** + * cfg80211_assoc_timeout - notification of timed out association + * @dev: network device + * @bss: The BSS entry with which association timed out. + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. + */ +void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss); + +/** + * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame + * @dev: network device + * @buf: 802.11 frame (header + body) + * @len: length of the frame data + * + * This function is called whenever deauthentication has been processed in + * station mode. This includes both received deauthentication frames and + * locally generated ones. This function may sleep. The caller must hold the + * corresponding wdev's mutex. + */ +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame + * @dev: network device + * @buf: deauthentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever a received deauthentication or dissassoc + * frame has been dropped in station mode because of MFP being used but the + * frame was not protected. This function may sleep. + */ +void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, + const u8 *buf, size_t len); + +/** + * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) + * @dev: network device + * @addr: The source MAC address of the frame + * @key_type: The key type that the received frame used + * @key_id: Key identifier (0..3). Can be -1 if missing. + * @tsc: The TSC value of the frame that generated the MIC failure (6 octets) + * @gfp: allocation flags + * + * This function is called whenever the local MAC detects a MIC failure in a + * received frame. This matches with MLME-MICHAELMICFAILURE.indication() + * primitive. + */ +void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, + enum nl80211_key_type key_type, int key_id, + const u8 *tsc, gfp_t gfp); + +/** + * cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS + * + * @dev: network device + * @bssid: the BSSID of the IBSS joined + * @channel: the channel of the IBSS joined + * @gfp: allocation flags + * + * This function notifies cfg80211 that the device joined an IBSS or + * switched to a different BSSID. Before this function can be called, + * either a beacon has to have been received from the IBSS, or one of + * the cfg80211_inform_bss{,_frame} functions must have been called + * with the locally generated beacon -- this guarantees that there is + * always a scan result for this IBSS. cfg80211 will handle the rest. + */ +void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, + struct ieee80211_channel *channel, gfp_t gfp); + +/** + * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate + * + * @dev: network device + * @macaddr: the MAC address of the new candidate + * @ie: information elements advertised by the peer candidate + * @ie_len: lenght of the information elements buffer + * @gfp: allocation flags + * + * This function notifies cfg80211 that the mesh peer candidate has been + * detected, most likely via a beacon or, less likely, via a probe response. + * cfg80211 then sends a notification to userspace. + */ +void cfg80211_notify_new_peer_candidate(struct net_device *dev, + const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp); + +/** + * DOC: RFkill integration + * + * RFkill integration in cfg80211 is almost invisible to drivers, + * as cfg80211 automatically registers an rfkill instance for each + * wireless device it knows about. Soft kill is also translated + * into disconnecting and turning all interfaces off, drivers are + * expected to turn off the device when all interfaces are down. + * + * However, devices may have a hard RFkill line, in which case they + * also need to interact with the rfkill subsystem, via cfg80211. + * They can do this with a few helper functions documented here. + */ + +/** + * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state + * @wiphy: the wiphy + * @blocked: block status + */ +void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked); + +/** + * wiphy_rfkill_start_polling - start polling rfkill + * @wiphy: the wiphy + */ +void wiphy_rfkill_start_polling(struct wiphy *wiphy); + +/** + * wiphy_rfkill_stop_polling - stop polling rfkill + * @wiphy: the wiphy + */ +void wiphy_rfkill_stop_polling(struct wiphy *wiphy); + +/** + * DOC: Vendor commands + * + * Occasionally, there are special protocol or firmware features that + * can't be implemented very openly. For this and similar cases, the + * vendor command functionality allows implementing the features with + * (typically closed-source) userspace and firmware, using nl80211 as + * the configuration mechanism. + * + * A driver supporting vendor commands must register them as an array + * in struct wiphy, with handlers for each one, each command has an + * OUI and sub command ID to identify it. + * + * Note that this feature should not be (ab)used to implement protocol + * features that could openly be shared across drivers. In particular, + * it must never be required to use vendor commands to implement any + * "normal" functionality that higher-level userspace like connection + * managers etc. need. + */ + +struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, + enum nl80211_commands cmd, + enum nl80211_attrs attr, + int approxlen); + +struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, + enum nl80211_commands cmd, + enum nl80211_attrs attr, + int vendor_event_idx, + int approxlen, gfp_t gfp); + +void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp); + +/** + * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply + * @wiphy: the wiphy + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * + * This function allocates and pre-fills an skb for a reply to + * a vendor command. Since it is intended for a reply, calling + * it outside of a vendor command's doit() operation is invalid. + * + * The returned skb is pre-filled with some identifying data in + * a way that any data that is put into the skb (with skb_put(), + * nla_put() or similar) will end up being within the + * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done + * with the skb is adding data for the corresponding userspace tool + * which can then read that data out of the vendor data attribute. + * You must not modify the skb in any other way. + * + * When done, call cfg80211_vendor_cmd_reply() with the skb and return + * its error code as the result of the doit() operation. + * + * Return: An allocated and pre-filled skb. %NULL if any errors happen. + */ +static inline struct sk_buff * +cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen) +{ + return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR, + NL80211_ATTR_VENDOR_DATA, approxlen); +} + +/** + * cfg80211_vendor_cmd_reply - send the reply skb + * @skb: The skb, must have been allocated with + * cfg80211_vendor_cmd_alloc_reply_skb() + * + * Since calling this function will usually be the last thing + * before returning from the vendor command doit() you should + * return the error code. Note that this function consumes the + * skb regardless of the return value. + * + * Return: An error code or 0 on success. + */ +int cfg80211_vendor_cmd_reply(struct sk_buff *skb); + +/** + * cfg80211_vendor_event_alloc - allocate vendor-specific event skb + * @wiphy: the wiphy + * @event_idx: index of the vendor event in the wiphy's vendor_events + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * @gfp: allocation flags + * + * This function allocates and pre-fills an skb for an event on the + * vendor-specific multicast group. + * + * When done filling the skb, call cfg80211_vendor_event() with the + * skb to send the event. + * + * Return: An allocated and pre-filled skb. %NULL if any errors happen. + */ +static inline struct sk_buff * +cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen, + int event_idx, gfp_t gfp) +{ + return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR, + NL80211_ATTR_VENDOR_DATA, + event_idx, approxlen, gfp); +} + +/** + * cfg80211_vendor_event - send the event + * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc() + * @gfp: allocation flags + * + * This function sends the given @skb, which must have been allocated + * by cfg80211_vendor_event_alloc(), as an event. It always consumes it. + */ +static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp) +{ + __cfg80211_send_event_skb(skb, gfp); +} + +#ifdef CONFIG_NL80211_TESTMODE +/** + * DOC: Test mode + * + * Test mode is a set of utility functions to allow drivers to + * interact with driver-specific tools to aid, for instance, + * factory programming. + * + * This chapter describes how drivers interact with it, for more + * information see the nl80211 book's chapter on it. + */ + +/** + * cfg80211_testmode_alloc_reply_skb - allocate testmode reply + * @wiphy: the wiphy + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * + * This function allocates and pre-fills an skb for a reply to + * the testmode command. Since it is intended for a reply, calling + * it outside of the @testmode_cmd operation is invalid. + * + * The returned skb is pre-filled with the wiphy index and set up in + * a way that any data that is put into the skb (with skb_put(), + * nla_put() or similar) will end up being within the + * %NL80211_ATTR_TESTDATA attribute, so all that needs to be done + * with the skb is adding data for the corresponding userspace tool + * which can then read that data out of the testdata attribute. You + * must not modify the skb in any other way. + * + * When done, call cfg80211_testmode_reply() with the skb and return + * its error code as the result of the @testmode_cmd operation. + * + * Return: An allocated and pre-filled skb. %NULL if any errors happen. + */ +static inline struct sk_buff * +cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen) +{ + return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE, + NL80211_ATTR_TESTDATA, approxlen); +} + +/** + * cfg80211_testmode_reply - send the reply skb + * @skb: The skb, must have been allocated with + * cfg80211_testmode_alloc_reply_skb() + * + * Since calling this function will usually be the last thing + * before returning from the @testmode_cmd you should return + * the error code. Note that this function consumes the skb + * regardless of the return value. + * + * Return: An error code or 0 on success. + */ +static inline int cfg80211_testmode_reply(struct sk_buff *skb) +{ + return cfg80211_vendor_cmd_reply(skb); +} + +/** + * cfg80211_testmode_alloc_event_skb - allocate testmode event + * @wiphy: the wiphy + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * @gfp: allocation flags + * + * This function allocates and pre-fills an skb for an event on the + * testmode multicast group. + * + * The returned skb is set up in the same way as with + * cfg80211_testmode_alloc_reply_skb() but prepared for an event. As + * there, you should simply add data to it that will then end up in the + * %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb + * in any other way. + * + * When done filling the skb, call cfg80211_testmode_event() with the + * skb to send the event. + * + * Return: An allocated and pre-filled skb. %NULL if any errors happen. + */ +static inline struct sk_buff * +cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) +{ + return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE, + NL80211_ATTR_TESTDATA, -1, + approxlen, gfp); +} + +/** + * cfg80211_testmode_event - send the event + * @skb: The skb, must have been allocated with + * cfg80211_testmode_alloc_event_skb() + * @gfp: allocation flags + * + * This function sends the given @skb, which must have been allocated + * by cfg80211_testmode_alloc_event_skb(), as an event. It always + * consumes it. + */ +static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) +{ + __cfg80211_send_event_skb(skb, gfp); +} + +#define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd), +#define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd), +#else +#define CFG80211_TESTMODE_CMD(cmd) +#define CFG80211_TESTMODE_DUMP(cmd) +#endif + +/** + * cfg80211_connect_result - notify cfg80211 of connection result + * + * @dev: network device + * @bssid: the BSSID of the AP + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @status: status code, 0 for successful connection, use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you + * the real status code for failures. + * @gfp: allocation flags + * + * It should be called by the underlying driver whenever connect() has + * succeeded. + */ +void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp); + +/** + * cfg80211_roamed - notify cfg80211 of roaming + * + * @dev: network device + * @channel: the channel of the new AP + * @bssid: the BSSID of the new AP + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @gfp: allocation flags + * + * It should be called by the underlying driver whenever it roamed + * from one AP to another while connected. + */ +void cfg80211_roamed(struct net_device *dev, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + +/** + * cfg80211_roamed_bss - notify cfg80211 of roaming + * + * @dev: network device + * @bss: entry of bss to which STA got roamed + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @gfp: allocation flags + * + * This is just a wrapper to notify cfg80211 of roaming event with driver + * passing bss to avoid a race in timeout of the bss entry. It should be + * called by the underlying driver whenever it roamed from one AP to another + * while connected. Drivers which have roaming implemented in firmware + * may use this function to avoid a race in bss entry timeout where the bss + * entry of the new AP is seen in the driver, but gets timed out by the time + * it is accessed in __cfg80211_roamed() due to delay in scheduling + * rdev->event_work. In case of any failures, the reference is released + * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise, + * it will be released while diconneting from the current bss. + */ +void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + +/** + * cfg80211_disconnected - notify cfg80211 that connection was dropped + * + * @dev: network device + * @ie: information elements of the deauth/disassoc frame (may be %NULL) + * @ie_len: length of IEs + * @reason: reason code for the disconnection, set it to 0 if unknown + * @gfp: allocation flags + * + * After it calls this function, the driver should enter an idle state + * and not try to connect to any AP any more. + */ +void cfg80211_disconnected(struct net_device *dev, u16 reason, + const u8 *ie, size_t ie_len, gfp_t gfp); + +/** + * cfg80211_ready_on_channel - notification of remain_on_channel start + * @wdev: wireless device + * @cookie: the request cookie + * @chan: The current channel (from remain_on_channel request) + * @duration: Duration in milliseconds that the driver intents to remain on the + * channel + * @gfp: allocation flags + */ +void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, + struct ieee80211_channel *chan, + unsigned int duration, gfp_t gfp); + +/** + * cfg80211_remain_on_channel_expired - remain_on_channel duration expired + * @wdev: wireless device + * @cookie: the request cookie + * @chan: The current channel (from remain_on_channel request) + * @gfp: allocation flags + */ +void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, + struct ieee80211_channel *chan, + gfp_t gfp); + + +/** + * cfg80211_new_sta - notify userspace about station + * + * @dev: the netdev + * @mac_addr: the station's address + * @sinfo: the station information + * @gfp: allocation flags + */ +void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo, gfp_t gfp); + +/** + * cfg80211_del_sta - notify userspace about deletion of a station + * + * @dev: the netdev + * @mac_addr: the station's address + * @gfp: allocation flags + */ +void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp); + +/** + * cfg80211_conn_failed - connection request failed notification + * + * @dev: the netdev + * @mac_addr: the station's address + * @reason: the reason for connection failure + * @gfp: allocation flags + * + * Whenever a station tries to connect to an AP and if the station + * could not connect to the AP as the AP has rejected the connection + * for some reasons, this function is called. + * + * The reason for connection failure can be any of the value from + * nl80211_connect_failed_reason enum + */ +void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, + enum nl80211_connect_failed_reason reason, + gfp_t gfp); + +/** + * cfg80211_rx_mgmt - notification of received, unprocessed management frame + * @wdev: wireless device receiving the frame + * @freq: Frequency on which the frame was received in MHz + * @sig_dbm: signal strength in mBm, or 0 if unknown + * @buf: Management frame (header + body) + * @len: length of the frame data + * @flags: flags, as defined in enum nl80211_rxmgmt_flags + * @gfp: context flags + * + * This function is called whenever an Action frame is received for a station + * mode interface, but is not processed in kernel. + * + * Return: %true if a user space application has registered for this frame. + * For action frames, that makes it responsible for rejecting unrecognized + * action frames; %false otherwise, in which case for action frames the + * driver is responsible for rejecting the frame. + */ +bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, + const u8 *buf, size_t len, u32 flags, gfp_t gfp); + +/** + * cfg80211_mgmt_tx_status - notification of TX status for management frame + * @wdev: wireless device receiving the frame + * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() + * @buf: Management frame (header + body) + * @len: length of the frame data + * @ack: Whether frame was acknowledged + * @gfp: context flags + * + * This function is called whenever a management frame was requested to be + * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the + * transmission attempt. + */ +void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, + const u8 *buf, size_t len, bool ack, gfp_t gfp); + + +/** + * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event + * @dev: network device + * @rssi_event: the triggered RSSI event + * @gfp: context flags + * + * This function is called when a configured connection quality monitoring + * rssi threshold reached event occurs. + */ +void cfg80211_cqm_rssi_notify(struct net_device *dev, + enum nl80211_cqm_rssi_threshold_event rssi_event, + gfp_t gfp); + +/** + * cfg80211_radar_event - radar detection event + * @wiphy: the wiphy + * @chandef: chandef for the current channel + * @gfp: context flags + * + * This function is called when a radar is detected on the current chanenl. + */ +void cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, gfp_t gfp); + +/** + * cfg80211_cac_event - Channel availability check (CAC) event + * @netdev: network device + * @chandef: chandef for the current channel + * @event: type of event + * @gfp: context flags + * + * This function is called when a Channel availability check (CAC) is finished + * or aborted. This must be called to notify the completion of a CAC process, + * also by full-MAC drivers. + */ +void cfg80211_cac_event(struct net_device *netdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event, gfp_t gfp); + + +/** + * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer + * @dev: network device + * @peer: peer's MAC address + * @num_packets: how many packets were lost -- should be a fixed threshold + * but probably no less than maybe 50, or maybe a throughput dependent + * threshold (to account for temporary interference) + * @gfp: context flags + */ +void cfg80211_cqm_pktloss_notify(struct net_device *dev, + const u8 *peer, u32 num_packets, gfp_t gfp); + +/** + * cfg80211_cqm_txe_notify - TX error rate event + * @dev: network device + * @peer: peer's MAC address + * @num_packets: how many packets were lost + * @rate: % of packets which failed transmission + * @intvl: interval (in s) over which the TX failure threshold was breached. + * @gfp: context flags + * + * Notify userspace when configured % TX failures over number of packets in a + * given interval is exceeded. + */ +void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, + u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); + +/** + * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying + * @dev: network device + * @bssid: BSSID of AP (to avoid races) + * @replay_ctr: new replay counter + * @gfp: allocation flags + */ +void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, + const u8 *replay_ctr, gfp_t gfp); + +/** + * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate + * @dev: network device + * @index: candidate index (the smaller the index, the higher the priority) + * @bssid: BSSID of AP + * @preauth: Whether AP advertises support for RSN pre-authentication + * @gfp: allocation flags + */ +void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, + const u8 *bssid, bool preauth, gfp_t gfp); + +/** + * cfg80211_rx_spurious_frame - inform userspace about a spurious frame + * @dev: The device the frame matched to + * @addr: the transmitter address + * @gfp: context flags + * + * This function is used in AP mode (only!) to inform userspace that + * a spurious class 3 frame was received, to be able to deauth the + * sender. + * Return: %true if the frame was passed to userspace (or this failed + * for a reason other than not having a subscription.) + */ +bool cfg80211_rx_spurious_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); + +/** + * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame + * @dev: The device the frame matched to + * @addr: the transmitter address + * @gfp: context flags + * + * This function is used in AP mode (only!) to inform userspace that + * an associated station sent a 4addr frame but that wasn't expected. + * It is allowed and desirable to send this event only once for each + * station to avoid event flooding. + * Return: %true if the frame was passed to userspace (or this failed + * for a reason other than not having a subscription.) + */ +bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); + +/** + * cfg80211_probe_status - notify userspace about probe status + * @dev: the device the probe was sent on + * @addr: the address of the peer + * @cookie: the cookie filled in @probe_client previously + * @acked: indicates whether probe was acked or not + * @gfp: allocation flags + */ +void cfg80211_probe_status(struct net_device *dev, const u8 *addr, + u64 cookie, bool acked, gfp_t gfp); + +/** + * cfg80211_report_obss_beacon - report beacon from other APs + * @wiphy: The wiphy that received the beacon + * @frame: the frame + * @len: length of the frame + * @freq: frequency the frame was received on + * @sig_dbm: signal strength in mBm, or 0 if unknown + * + * Use this function to report to userspace when a beacon was + * received. It is not useful to call this when there is no + * netdev that is in AP/GO mode. + */ +void cfg80211_report_obss_beacon(struct wiphy *wiphy, + const u8 *frame, size_t len, + int freq, int sig_dbm); + +/** + * cfg80211_reg_can_beacon - check if beaconing is allowed + * @wiphy: the wiphy + * @chandef: the channel definition + * @iftype: interface type + * + * Return: %true if there is no secondary channel or the secondary channel(s) + * can be used for beaconing (i.e. is not a radar channel etc.) + */ +bool cfg80211_reg_can_beacon(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype); + +/* + * cfg80211_ch_switch_notify - update wdev channel and notify userspace + * @dev: the device which switched channels + * @chandef: the new channel definition + * + * Caller must acquire wdev_lock, therefore must only be called from sleepable + * driver context! + */ +void cfg80211_ch_switch_notify(struct net_device *dev, + struct cfg80211_chan_def *chandef); + +/** + * ieee80211_operating_class_to_band - convert operating class to band + * + * @operating_class: the operating class to convert + * @band: band pointer to fill + * + * Returns %true if the conversion was successful, %false otherwise. + */ +bool ieee80211_operating_class_to_band(u8 operating_class, + enum ieee80211_band *band); + +/* + * cfg80211_tdls_oper_request - request userspace to perform TDLS operation + * @dev: the device on which the operation is requested + * @peer: the MAC address of the peer device + * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or + * NL80211_TDLS_TEARDOWN) + * @reason_code: the reason code for teardown request + * @gfp: allocation flags + * + * This function is used to request userspace to perform TDLS operation that + * requires knowledge of keys, i.e., link setup or teardown when the AP + * connection uses encryption. This is optional mechanism for the driver to use + * if it can automatically determine when a TDLS link could be useful (e.g., + * based on traffic and signal strength for a peer). + */ +void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, + enum nl80211_tdls_operation oper, + u16 reason_code, gfp_t gfp); + +/* + * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) + * @rate: given rate_info to calculate bitrate from + * + * return 0 if MCS index >= 32 + */ +u32 cfg80211_calculate_bitrate(struct rate_info *rate); + +/** + * cfg80211_unregister_wdev - remove the given wdev + * @wdev: struct wireless_dev to remove + * + * Call this function only for wdevs that have no netdev assigned, + * e.g. P2P Devices. It removes the device from the list so that + * it can no longer be used. It is necessary to call this function + * even when cfg80211 requests the removal of the interface by + * calling the del_virtual_intf() callback. The function must also + * be called when the driver wishes to unregister the wdev, e.g. + * when the device is unbound from the driver. + * + * Requires the RTNL to be held. + */ +void cfg80211_unregister_wdev(struct wireless_dev *wdev); + +/** + * struct cfg80211_ft_event - FT Information Elements + * @ies: FT IEs + * @ies_len: length of the FT IE in bytes + * @target_ap: target AP's MAC address + * @ric_ies: RIC IE + * @ric_ies_len: length of the RIC IE in bytes + */ +struct cfg80211_ft_event_params { + const u8 *ies; + size_t ies_len; + const u8 *target_ap; + const u8 *ric_ies; + size_t ric_ies_len; +}; + +/** + * cfg80211_ft_event - notify userspace about FT IE and RIC IE + * @netdev: network device + * @ft_event: IE information + */ +void cfg80211_ft_event(struct net_device *netdev, + struct cfg80211_ft_event_params *ft_event); + +/** + * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer + * @ies: the input IE buffer + * @len: the input length + * @attr: the attribute ID to find + * @buf: output buffer, can be %NULL if the data isn't needed, e.g. + * if the function is only called to get the needed buffer size + * @bufsize: size of the output buffer + * + * The function finds a given P2P attribute in the (vendor) IEs and + * copies its contents to the given buffer. + * + * Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is + * malformed or the attribute can't be found (respectively), or the + * length of the found attribute (which can be zero). + */ +int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, + enum ieee80211_p2p_attr_id attr, + u8 *buf, unsigned int bufsize); + +/** + * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN + * @wdev: the wireless device reporting the wakeup + * @wakeup: the wakeup report + * @gfp: allocation flags + * + * This function reports that the given device woke up. If it + * caused the wakeup, report the reason(s), otherwise you may + * pass %NULL as the @wakeup parameter to advertise that something + * else caused the wakeup. + */ +void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, + struct cfg80211_wowlan_wakeup *wakeup, + gfp_t gfp); + +/** + * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver. + * + * @wdev: the wireless device for which critical protocol is stopped. + * @gfp: allocation flags + * + * This function can be called by the driver to indicate it has reverted + * operation back to normal. One reason could be that the duration given + * by .crit_proto_start() has expired. + */ +void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp); + +/** + * ieee80211_get_num_supported_channels - get number of channels device has + * @wiphy: the wiphy + * + * Return: the number of channels supported by the device. + */ +unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); + +/** + * cfg80211_check_combinations - check interface combinations + * + * @wiphy: the wiphy + * @num_different_channels: the number of different channels we want + * to use for verification + * @radar_detect: a bitmap where each bit corresponds to a channel + * width where radar detection is needed, as in the definition of + * &struct ieee80211_iface_combination.@radar_detect_widths + * @iftype_num: array with the numbers of interfaces of each interface + * type. The index is the interface type as specified in &enum + * nl80211_iftype. + * + * This function can be called by the driver to check whether a + * combination of interfaces and their types are allowed according to + * the interface combinations. + */ +int cfg80211_check_combinations(struct wiphy *wiphy, + const int num_different_channels, + const u8 radar_detect, + const int iftype_num[NUM_NL80211_IFTYPES]); + +/** + * cfg80211_iter_combinations - iterate over matching combinations + * + * @wiphy: the wiphy + * @num_different_channels: the number of different channels we want + * to use for verification + * @radar_detect: a bitmap where each bit corresponds to a channel + * width where radar detection is needed, as in the definition of + * &struct ieee80211_iface_combination.@radar_detect_widths + * @iftype_num: array with the numbers of interfaces of each interface + * type. The index is the interface type as specified in &enum + * nl80211_iftype. + * @iter: function to call for each matching combination + * @data: pointer to pass to iter function + * + * This function can be called by the driver to check what possible + * combinations it fits in at a given moment, e.g. for channel switching + * purposes. + */ +int cfg80211_iter_combinations(struct wiphy *wiphy, + const int num_different_channels, + const u8 radar_detect, + const int iftype_num[NUM_NL80211_IFTYPES], + void (*iter)(const struct ieee80211_iface_combination *c, + void *data), + void *data); + +/* + * cfg80211_stop_iface - trigger interface disconnection + * + * @wiphy: the wiphy + * @wdev: wireless device + * @gfp: context flags + * + * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA + * disconnected. + * + * Note: This doesn't need any locks and is asynchronous. + */ +void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, + gfp_t gfp); + +/** + * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy + * @wiphy: the wiphy to shut down + * + * This function shuts down all interfaces belonging to this wiphy by + * calling dev_close() (and treating non-netdev interfaces as needed). + * It shouldn't really be used unless there are some fatal device errors + * that really can't be recovered in any other way. + * + * Callers must hold the RTNL and be able to deal with callbacks into + * the driver while the function is running. + */ +void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy); + +/* Logging, debugging and troubleshooting/diagnostic helpers. */ + +/* wiphy_printk helpers, similar to dev_printk */ + +#define wiphy_printk(level, wiphy, format, args...) \ + dev_printk(level, &(wiphy)->dev, format, ##args) +#define wiphy_emerg(wiphy, format, args...) \ + dev_emerg(&(wiphy)->dev, format, ##args) +#define wiphy_alert(wiphy, format, args...) \ + dev_alert(&(wiphy)->dev, format, ##args) +#define wiphy_crit(wiphy, format, args...) \ + dev_crit(&(wiphy)->dev, format, ##args) +#define wiphy_err(wiphy, format, args...) \ + dev_err(&(wiphy)->dev, format, ##args) +#define wiphy_warn(wiphy, format, args...) \ + dev_warn(&(wiphy)->dev, format, ##args) +#define wiphy_notice(wiphy, format, args...) \ + dev_notice(&(wiphy)->dev, format, ##args) +#define wiphy_info(wiphy, format, args...) \ + dev_info(&(wiphy)->dev, format, ##args) + +#define wiphy_debug(wiphy, format, args...) \ + wiphy_printk(KERN_DEBUG, wiphy, format, ##args) + +#define wiphy_dbg(wiphy, format, args...) \ + dev_dbg(&(wiphy)->dev, format, ##args) + +#if defined(VERBOSE_DEBUG) +#define wiphy_vdbg wiphy_dbg +#else +#define wiphy_vdbg(wiphy, format, args...) \ +({ \ + if (0) \ + wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ + 0; \ +}) +#endif + +/* + * wiphy_WARN() acts like wiphy_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define wiphy_WARN(wiphy, format, args...) \ + WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); + #endif /* __NET_CFG80211_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 07602b7fa21..87cb1903640 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -57,18 +57,33 @@ static __inline__ __wsum csum_and_copy_to_user } #endif +#ifndef HAVE_ARCH_CSUM_ADD static inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; return (__force __wsum)(res + (res < (__force u32)addend)); } +#endif static inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } +static inline __sum16 csum16_add(__sum16 csum, __be16 addend) +{ + u16 res = (__force u16)csum; + + res += (__force u16)addend; + return (__force __sum16)(res + (res < (__force u16)addend)); +} + +static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) +{ + return csum16_add(csum, ~addend); +} + static inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { @@ -79,6 +94,12 @@ csum_block_add(__wsum csum, __wsum csum2, int offset) } static inline __wsum +csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) +{ + return csum_block_add(csum, csum2, offset); +} + +static inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; @@ -92,23 +113,37 @@ static inline __wsum csum_unfold(__sum16 n) return (__force __wsum)n; } +static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) +{ + return csum_partial(buff, len, sum); +} + #define CSUM_MANGLED_0 ((__force __sum16)0xffff) static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __be32 diff[] = { ~from, to }; - *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum))); + *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); } -static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to) +/* Implements RFC 1624 (Incremental Internet Checksum) + * 3. Discussion states : + * HC' = ~(~HC + ~m + m') + * m : old value of a 16bit field + * m' : new value of a 16bit field + */ +static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { - csum_replace4(sum, (__force __be32)from, (__force __be32)to); + *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } struct sk_buff; -extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr); +void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, + __be32 from, __be32 to, int pseudohdr); +void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, + const __be32 *from, const __be32 *to, + int pseudohdr); static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index a6bb94530cf..a6fd939f202 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -8,7 +8,7 @@ * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ @@ -26,8 +26,7 @@ * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * */ @@ -40,11 +39,14 @@ #include <linux/net.h> #include <linux/skbuff.h> #include <net/netlabel.h> +#include <net/request_sock.h> +#include <linux/atomic.h> +#include <asm/unaligned.h> /* known doi values */ #define CIPSO_V4_DOI_UNKNOWN 0x00000000 -/* tag types */ +/* standard tag types */ #define CIPSO_V4_TAG_INVALID 0 #define CIPSO_V4_TAG_RBITMAP 1 #define CIPSO_V4_TAG_ENUM 2 @@ -52,10 +54,14 @@ #define CIPSO_V4_TAG_PBITMAP 6 #define CIPSO_V4_TAG_FREEFORM 7 +/* non-standard tag types (tags > 127) */ +#define CIPSO_V4_TAG_LOCAL 128 + /* doi mapping types */ #define CIPSO_V4_MAP_UNKNOWN 0 -#define CIPSO_V4_MAP_STD 1 +#define CIPSO_V4_MAP_TRANS 1 #define CIPSO_V4_MAP_PASS 2 +#define CIPSO_V4_MAP_LOCAL 3 /* limits */ #define CIPSO_V4_MAX_REM_LVLS 255 @@ -79,10 +85,9 @@ struct cipso_v4_doi { } map; u8 tags[CIPSO_V4_TAG_MAXCNT]; - u32 valid; + atomic_t refcount; struct list_head list; struct rcu_head rcu; - struct list_head dom_list; }; /* Standard CIPSO mapping table */ @@ -127,26 +132,29 @@ extern int cipso_v4_rbm_strictvalid; */ #ifdef CONFIG_NETLABEL -int cipso_v4_doi_add(struct cipso_v4_doi *doi_def); -int cipso_v4_doi_remove(u32 doi, - struct netlbl_audit *audit_info, - void (*callback) (struct rcu_head * head)); +int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, + struct netlbl_audit *audit_info); +void cipso_v4_doi_free(struct cipso_v4_doi *doi_def); +int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info); struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi); +void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def); int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg); -int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain); -int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, - const char *domain); #else -static inline int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) +static inline int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, + struct netlbl_audit *audit_info) { return -ENOSYS; } +static inline void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) +{ + return; +} + static inline int cipso_v4_doi_remove(u32 doi, - struct netlbl_audit *audit_info, - void (*callback) (struct rcu_head * head)) + struct netlbl_audit *audit_info) { return 0; } @@ -206,10 +214,19 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway); int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr); +void cipso_v4_sock_delattr(struct sock *sk); int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); +int cipso_v4_req_setattr(struct request_sock *req, + const struct cipso_v4_doi *doi_def, + const struct netlbl_lsm_secattr *secattr); +void cipso_v4_req_delattr(struct request_sock *req); +int cipso_v4_skbuff_setattr(struct sk_buff *skb, + const struct cipso_v4_doi *doi_def, + const struct netlbl_lsm_secattr *secattr); +int cipso_v4_skbuff_delattr(struct sk_buff *skb); int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr); -int cipso_v4_validate(unsigned char **option); +int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option); #else static inline void cipso_v4_error(struct sk_buff *skb, int error, @@ -225,21 +242,78 @@ static inline int cipso_v4_sock_setattr(struct sock *sk, return -ENOSYS; } +static inline void cipso_v4_sock_delattr(struct sock *sk) +{ +} + static inline int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { return -ENOSYS; } +static inline int cipso_v4_req_setattr(struct request_sock *req, + const struct cipso_v4_doi *doi_def, + const struct netlbl_lsm_secattr *secattr) +{ + return -ENOSYS; +} + +static inline void cipso_v4_req_delattr(struct request_sock *req) +{ + return; +} + +static inline int cipso_v4_skbuff_setattr(struct sk_buff *skb, + const struct cipso_v4_doi *doi_def, + const struct netlbl_lsm_secattr *secattr) +{ + return -ENOSYS; +} + +static inline int cipso_v4_skbuff_delattr(struct sk_buff *skb) +{ + return -ENOSYS; +} + static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return -ENOSYS; } -static inline int cipso_v4_validate(unsigned char **option) +static inline int cipso_v4_validate(const struct sk_buff *skb, + unsigned char **option) { - return -ENOSYS; + unsigned char *opt = *option; + unsigned char err_offset = 0; + u8 opt_len = opt[1]; + u8 opt_iter; + u8 tag_len; + + if (opt_len < 8) { + err_offset = 1; + goto out; + } + + if (get_unaligned_be32(&opt[2]) == 0) { + err_offset = 2; + goto out; + } + + for (opt_iter = 6; opt_iter < opt_len;) { + tag_len = opt[opt_iter + 1]; + if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { + err_offset = opt_iter + 1; + goto out; + } + opt_iter += tag_len; + } + +out: + *option = opt + err_offset; + return err_offset; + } #endif /* CONFIG_NETLABEL */ diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h new file mode 100644 index 00000000000..c15d39456e1 --- /dev/null +++ b/include/net/cls_cgroup.h @@ -0,0 +1,57 @@ +/* + * cls_cgroup.h Control Group Classifier + * + * Authors: Thomas Graf <tgraf@suug.ch> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _NET_CLS_CGROUP_H +#define _NET_CLS_CGROUP_H + +#include <linux/cgroup.h> +#include <linux/hardirq.h> +#include <linux/rcupdate.h> +#include <net/sock.h> + +#ifdef CONFIG_CGROUP_NET_CLASSID +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; +}; + +struct cgroup_cls_state *task_cls_state(struct task_struct *p); + +static inline u32 task_cls_classid(struct task_struct *p) +{ + u32 classid; + + if (in_interrupt()) + return 0; + + rcu_read_lock(); + classid = container_of(task_css(p, net_cls_cgrp_id), + struct cgroup_cls_state, css)->classid; + rcu_read_unlock(); + + return classid; +} + +static inline void sock_update_classid(struct sock *sk) +{ + u32 classid; + + classid = task_cls_classid(current); + if (classid != sk->sk_classid) + sk->sk_classid = classid; +} +#else /* !CONFIG_CGROUP_NET_CLASSID */ +static inline void sock_update_classid(struct sock *sk) +{ +} +#endif /* CONFIG_CGROUP_NET_CLASSID */ +#endif /* _NET_CLS_CGROUP_H */ diff --git a/include/net/codel.h b/include/net/codel.h new file mode 100644 index 00000000000..fe0eab32ce7 --- /dev/null +++ b/include/net/codel.h @@ -0,0 +1,355 @@ +#ifndef __NET_SCHED_CODEL_H +#define __NET_SCHED_CODEL_H + +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/skbuff.h> +#include <net/pkt_sched.h> +#include <net/inet_ecn.h> + +/* Controlling Queue Delay (CoDel) algorithm + * ========================================= + * Source : Kathleen Nichols and Van Jacobson + * http://queue.acm.org/detail.cfm?id=2209336 + * + * Implemented on linux by Dave Taht and Eric Dumazet + */ + + +/* CoDel uses a 1024 nsec clock, encoded in u32 + * This gives a range of 2199 seconds, because of signed compares + */ +typedef u32 codel_time_t; +typedef s32 codel_tdiff_t; +#define CODEL_SHIFT 10 +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) + +static inline codel_time_t codel_get_time(void) +{ + u64 ns = ktime_to_ns(ktime_get()); + + return ns >> CODEL_SHIFT; +} + +/* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia: + * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution + * codel_time_after(a,b) returns true if the time a is after time b. + */ +#define codel_time_after(a, b) \ + (typecheck(codel_time_t, a) && \ + typecheck(codel_time_t, b) && \ + ((s32)((a) - (b)) > 0)) +#define codel_time_before(a, b) codel_time_after(b, a) + +#define codel_time_after_eq(a, b) \ + (typecheck(codel_time_t, a) && \ + typecheck(codel_time_t, b) && \ + ((s32)((a) - (b)) >= 0)) +#define codel_time_before_eq(a, b) codel_time_after_eq(b, a) + +/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ +struct codel_skb_cb { + codel_time_t enqueue_time; +}; + +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) +{ + return get_codel_cb(skb)->enqueue_time; +} + +static void codel_set_enqueue_time(struct sk_buff *skb) +{ + get_codel_cb(skb)->enqueue_time = codel_get_time(); +} + +static inline u32 codel_time_to_us(codel_time_t val) +{ + u64 valns = ((u64)val << CODEL_SHIFT); + + do_div(valns, NSEC_PER_USEC); + return (u32)valns; +} + +/** + * struct codel_params - contains codel parameters + * @target: target queue size (in time units) + * @interval: width of moving time window + * @ecn: is Explicit Congestion Notification enabled + */ +struct codel_params { + codel_time_t target; + codel_time_t interval; + bool ecn; +}; + +/** + * struct codel_vars - contains codel variables + * @count: how many drops we've done since the last time we + * entered dropping state + * @lastcount: count at entry to dropping state + * @dropping: set to true if in dropping state + * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 + * @first_above_time: when we went (or will go) continuously above target + * for interval + * @drop_next: time to drop next packet, or when we dropped last + * @ldelay: sojourn time of last dequeued packet + */ +struct codel_vars { + u32 count; + u32 lastcount; + bool dropping; + u16 rec_inv_sqrt; + codel_time_t first_above_time; + codel_time_t drop_next; + codel_time_t ldelay; +}; + +#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ +/* needed shift to get a Q0.32 number from rec_inv_sqrt */ +#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) + +/** + * struct codel_stats - contains codel shared variables and stats + * @maxpacket: largest packet we've seen so far + * @drop_count: temp count of dropped packets in dequeue() + * ecn_mark: number of packets we ECN marked instead of dropping + */ +struct codel_stats { + u32 maxpacket; + u32 drop_count; + u32 ecn_mark; +}; + +static void codel_params_init(struct codel_params *params) +{ + params->interval = MS2TIME(100); + params->target = MS2TIME(5); + params->ecn = false; +} + +static void codel_vars_init(struct codel_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); +} + +static void codel_stats_init(struct codel_stats *stats) +{ + stats->maxpacket = 256; +} + +/* + * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ +static void codel_Newton_step(struct codel_vars *vars) +{ + u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; + u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; +} + +/* + * CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static codel_time_t codel_control_law(codel_time_t t, + codel_time_t interval, + u32 rec_inv_sqrt) +{ + return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); +} + +static bool codel_should_drop(const struct sk_buff *skb, + struct Qdisc *sch, + struct codel_vars *vars, + struct codel_params *params, + struct codel_stats *stats, + codel_time_t now) +{ + bool ok_to_drop; + + if (!skb) { + vars->first_above_time = 0; + return false; + } + + vars->ldelay = now - codel_get_enqueue_time(skb); + sch->qstats.backlog -= qdisc_pkt_len(skb); + + if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) + stats->maxpacket = qdisc_pkt_len(skb); + + if (codel_time_before(vars->ldelay, params->target) || + sch->qstats.backlog <= stats->maxpacket) { + /* went below - stay below for at least interval */ + vars->first_above_time = 0; + return false; + } + ok_to_drop = false; + if (vars->first_above_time == 0) { + /* just went above from below. If we stay above + * for at least interval we'll say it's ok to drop + */ + vars->first_above_time = now + params->interval; + } else if (codel_time_after(now, vars->first_above_time)) { + ok_to_drop = true; + } + return ok_to_drop; +} + +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, + struct Qdisc *sch); + +static struct sk_buff *codel_dequeue(struct Qdisc *sch, + struct codel_params *params, + struct codel_vars *vars, + struct codel_stats *stats, + codel_skb_dequeue_t dequeue_func) +{ + struct sk_buff *skb = dequeue_func(vars, sch); + codel_time_t now; + bool drop; + + if (!skb) { + vars->dropping = false; + return skb; + } + now = codel_get_time(); + drop = codel_should_drop(skb, sch, vars, params, stats, now); + if (vars->dropping) { + if (!drop) { + /* sojourn time below target - leave dropping state */ + vars->dropping = false; + } else if (codel_time_after_eq(now, vars->drop_next)) { + /* It's time for the next drop. Drop the current + * packet and dequeue the next. The dequeue might + * take us out of dropping state. + * If not, schedule the next drop. + * A large backlog might result in drop rates so high + * that the next drop should happen now, + * hence the while loop. + */ + while (vars->dropping && + codel_time_after_eq(now, vars->drop_next)) { + vars->count++; /* dont care of possible wrap + * since there is no more divide + */ + codel_Newton_step(vars); + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + goto end; + } + qdisc_drop(skb, sch); + stats->drop_count++; + skb = dequeue_func(vars, sch); + if (!codel_should_drop(skb, sch, + vars, params, stats, now)) { + /* leave dropping state */ + vars->dropping = false; + } else { + /* and schedule the next drop */ + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + } + } + } + } else if (drop) { + u32 delta; + + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + } else { + qdisc_drop(skb, sch); + stats->drop_count++; + + skb = dequeue_func(vars, sch); + drop = codel_should_drop(skb, sch, vars, params, + stats, now); + } + vars->dropping = true; + /* if min went above target close to when we last went below it + * assume that the drop rate that controlled the queue on the + * last cycle is a good starting point to control it now. + */ + delta = vars->count - vars->lastcount; + if (delta > 1 && + codel_time_before(now - vars->drop_next, + 16 * params->interval)) { + vars->count = delta; + /* we dont care if rec_inv_sqrt approximation + * is not very precise : + * Next Newton steps will correct it quadratically. + */ + codel_Newton_step(vars); + } else { + vars->count = 1; + vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; + } + vars->lastcount = vars->count; + vars->drop_next = codel_control_law(now, params->interval, + vars->rec_inv_sqrt); + } +end: + return skb; +} +#endif diff --git a/include/net/compat.h b/include/net/compat.h index 406db242f73..3b603b199c0 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -18,26 +18,52 @@ struct compat_msghdr { compat_uint_t msg_flags; }; +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + struct compat_cmsghdr { compat_size_t cmsg_len; compat_int_t cmsg_level; compat_int_t cmsg_type; }; -extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *); -extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *); +int compat_sock_get_timestamp(struct sock *, struct timeval __user *); +int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #else /* defined(CONFIG_COMPAT) */ -#define compat_msghdr msghdr /* to avoid compiler warnings */ +/* + * To avoid compiler warnings: + */ +#define compat_msghdr msghdr +#define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ -extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); -extern int verify_compat_iovec(struct msghdr *, struct iovec *, char *, int); -extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned); -extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned); -extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); -extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); +int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); +int verify_compat_iovec(struct msghdr *, struct iovec *, + struct sockaddr_storage *, int); +asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, + unsigned int); +asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, + unsigned int, unsigned int); +asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *, + unsigned int); +asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *, + unsigned int, unsigned int, + struct compat_timespec __user *); +asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, + int __user *); +int put_cmsg_compat(struct msghdr*, int, int, int, void *); + +int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, + unsigned char *, int); -extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); +int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int, + int (*)(struct sock *, int, int, char __user *, + unsigned int)); +int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *, + int (*)(struct sock *, int, int, char __user *, + int __user *)); #endif /* NET_COMPAT_H */ diff --git a/include/net/datalink.h b/include/net/datalink.h index deb7ca75db4..93cb18f729b 100644 --- a/include/net/datalink.h +++ b/include/net/datalink.h @@ -15,4 +15,6 @@ struct datalink_proto { struct list_head node; }; +struct datalink_proto *make_EII_client(void); +void destroy_EII_client(struct datalink_proto *dl); #endif diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h new file mode 100644 index 00000000000..aec07c8a660 --- /dev/null +++ b/include/net/dcbevent.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2010, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: John Fastabend <john.r.fastabend@intel.com> + */ + +#ifndef _DCB_EVENT_H +#define _DCB_EVENT_H + +enum dcbevent_notif_type { + DCB_APP_EVENT = 1, +}; + +#ifdef CONFIG_DCB +int register_dcbevent_notifier(struct notifier_block *nb); +int unregister_dcbevent_notifier(struct notifier_block *nb); +int call_dcbevent_notifiers(unsigned long val, void *v); +#else +static inline int +register_dcbevent_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_dcbevent_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int call_dcbevent_notifiers(unsigned long val, void *v) +{ + return 0; +} +#endif /* CONFIG_DCB */ + +#endif diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h new file mode 100644 index 00000000000..a975edf21b2 --- /dev/null +++ b/include/net/dcbnl.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: Lucy Liu <lucy.liu@intel.com> + */ + +#ifndef __NET_DCBNL_H__ +#define __NET_DCBNL_H__ + +#include <linux/dcbnl.h> + +struct dcb_app_type { + int ifindex; + struct dcb_app app; + struct list_head list; + u8 dcbx; +}; + +int dcb_setapp(struct net_device *, struct dcb_app *); +u8 dcb_getapp(struct net_device *, struct dcb_app *); +int dcb_ieee_setapp(struct net_device *, struct dcb_app *); +int dcb_ieee_delapp(struct net_device *, struct dcb_app *); +u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); + +int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); +int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); + +/* + * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through + * the netdevice struct. + */ +struct dcbnl_rtnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getets) (struct net_device *, struct ieee_ets *); + int (*ieee_setets) (struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); + int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); + int (*ieee_getapp) (struct net_device *, struct dcb_app *); + int (*ieee_setapp) (struct net_device *, struct dcb_app *); + int (*ieee_delapp) (struct net_device *, struct dcb_app *); + int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *); + int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *); + + /* CEE std */ + u8 (*getstate)(struct net_device *); + u8 (*setstate)(struct net_device *, u8); + void (*getpermhwaddr)(struct net_device *, u8 *); + void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgtx)(struct net_device *, int, u8); + void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgrx)(struct net_device *, int, u8); + void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); + void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); + void (*setpfccfg)(struct net_device *, int, u8); + void (*getpfccfg)(struct net_device *, int, u8 *); + u8 (*setall)(struct net_device *); + u8 (*getcap)(struct net_device *, int, u8 *); + int (*getnumtcs)(struct net_device *, int, u8 *); + int (*setnumtcs)(struct net_device *, int, u8); + u8 (*getpfcstate)(struct net_device *); + void (*setpfcstate)(struct net_device *, u8); + void (*getbcncfg)(struct net_device *, int, u32 *); + void (*setbcncfg)(struct net_device *, int, u32); + void (*getbcnrp)(struct net_device *, int, u8 *); + void (*setbcnrp)(struct net_device *, int, u8); + u8 (*setapp)(struct net_device *, u8, u16, u8); + u8 (*getapp)(struct net_device *, u8, u16); + u8 (*getfeatcfg)(struct net_device *, int, u8 *); + u8 (*setfeatcfg)(struct net_device *, int, u8); + + /* DCBX configuration */ + u8 (*getdcbx)(struct net_device *); + u8 (*setdcbx)(struct net_device *, u8); + + /* peer apps */ + int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, + u16 *); + int (*peer_getapptable)(struct net_device *, struct dcb_app *); + + /* CEE peer */ + int (*cee_peer_getpg) (struct net_device *, struct cee_pg *); + int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *); +}; + +#endif /* __NET_DCBNL_H__ */ diff --git a/include/net/dn.h b/include/net/dn.h index 627778384c8..913b73d239f 100644 --- a/include/net/dn.h +++ b/include/net/dn.h @@ -3,10 +3,9 @@ #include <linux/dn.h> #include <net/sock.h> +#include <net/flow.h> #include <asm/byteorder.h> - -#define dn_ntohs(x) le16_to_cpu(x) -#define dn_htons(x) cpu_to_le16(x) +#include <asm/unaligned.h> struct dn_scp /* Session Control Port */ { @@ -175,7 +174,7 @@ struct dn_skb_cb { static inline __le16 dn_eth2dn(unsigned char *ethaddr) { - return dn_htons(ethaddr[4] | (ethaddr[5] << 8)); + return get_unaligned((__le16 *)(ethaddr + 4)); } static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr) @@ -185,7 +184,7 @@ static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr) static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr) { - __u16 a = dn_ntohs(addr); + __u16 a = le16_to_cpu(addr); ethaddr[0] = 0xAA; ethaddr[1] = 0x00; ethaddr[2] = 0x04; @@ -194,30 +193,34 @@ static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr) ethaddr[5] = (__u8)(a >> 8); } -static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp) +static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp) { - fl->uli_u.dnports.sport = scp->addrloc; - fl->uli_u.dnports.dport = scp->addrrem; + fld->fld_sport = scp->addrloc; + fld->fld_dport = scp->addrrem; } -extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu); +unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu); +void dn_register_sysctl(void); +void dn_unregister_sysctl(void); #define DN_MENUVER_ACC 0x01 #define DN_MENUVER_USR 0x02 #define DN_MENUVER_PRX 0x04 #define DN_MENUVER_UIC 0x08 -extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr); -extern struct sock *dn_find_by_skb(struct sk_buff *skb); +struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr); +struct sock *dn_find_by_skb(struct sk_buff *skb); #define DN_ASCBUF_LEN 9 -extern char *dn_addr2asc(__u16, char *); -extern int dn_destroy_timer(struct sock *sk); +char *dn_addr2asc(__u16, char *); +int dn_destroy_timer(struct sock *sk); -extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type); -extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type); +int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, + unsigned char type); +int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, + unsigned char *type); -extern void dn_start_slow_timer(struct sock *sk); -extern void dn_stop_slow_timer(struct sock *sk); +void dn_start_slow_timer(struct sock *sk); +void dn_stop_slow_timer(struct sock *sk); extern __le16 decnet_address; extern int decnet_debug_level; @@ -227,7 +230,7 @@ extern int decnet_di_count; extern int decnet_dr_count; extern int decnet_no_fc_max_cwnd; -extern int sysctl_decnet_mem[3]; +extern long sysctl_decnet_mem[3]; extern int sysctl_decnet_wmem[3]; extern int sysctl_decnet_rmem[3]; diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h index cee46821dc5..197886cd7bd 100644 --- a/include/net/dn_dev.h +++ b/include/net/dn_dev.h @@ -5,13 +5,14 @@ struct dn_dev; struct dn_ifaddr { - struct dn_ifaddr *ifa_next; + struct dn_ifaddr __rcu *ifa_next; struct dn_dev *ifa_dev; __le16 ifa_local; __le16 ifa_address; - __u8 ifa_flags; + __u32 ifa_flags; __u8 ifa_scope; char ifa_label[IFNAMSIZ]; + struct rcu_head rcu; }; #define DN_DEV_S_RU 0 /* Run - working normally */ @@ -75,7 +76,6 @@ struct dn_dev_parms { unsigned long t3; /* Default value of t3 */ int priority; /* Priority to be a router */ char *name; /* Name for sysctl */ - int ctl_name; /* Index for sysctl */ int (*up)(struct net_device *); void (*down)(struct net_device *); void (*timer3)(struct net_device *, struct dn_ifaddr *ifa); @@ -84,7 +84,7 @@ struct dn_dev_parms { struct dn_dev { - struct dn_ifaddr *ifa_list; + struct dn_ifaddr __rcu *ifa_list; struct net_device *dev; struct dn_dev_parms parms; char use_long; @@ -97,16 +97,14 @@ struct dn_dev { unsigned long uptime; /* Time device went up in jiffies */ }; -struct dn_short_packet -{ +struct dn_short_packet { __u8 msgflg; __le16 dstnode; __le16 srcnode; __u8 forward; -} __attribute__((packed)); +} __packed; -struct dn_long_packet -{ +struct dn_long_packet { __u8 msgflg; __u8 d_area; __u8 d_subarea; @@ -118,12 +116,11 @@ struct dn_long_packet __u8 visit_ct; __u8 s_class; __u8 pt; -} __attribute__((packed)); +} __packed; /*------------------------- DRP - Routing messages ---------------------*/ -struct endnode_hello_message -{ +struct endnode_hello_message { __u8 msgflg; __u8 tiver[3]; __u8 id[6]; @@ -136,10 +133,9 @@ struct endnode_hello_message __u8 mpd; __u8 datalen; __u8 data[2]; -} __attribute__((packed)); +} __packed; -struct rtnode_hello_message -{ +struct rtnode_hello_message { __u8 msgflg; __u8 tiver[3]; __u8 id[6]; @@ -149,46 +145,54 @@ struct rtnode_hello_message __u8 area; __le16 timer; __u8 mpd; -} __attribute__((packed)); +} __packed; -extern void dn_dev_init(void); -extern void dn_dev_cleanup(void); +void dn_dev_init(void); +void dn_dev_cleanup(void); -extern int dn_dev_ioctl(unsigned int cmd, void __user *arg); +int dn_dev_ioctl(unsigned int cmd, void __user *arg); -extern void dn_dev_devices_off(void); -extern void dn_dev_devices_on(void); +void dn_dev_devices_off(void); +void dn_dev_devices_on(void); -extern void dn_dev_init_pkt(struct sk_buff *skb); -extern void dn_dev_veri_pkt(struct sk_buff *skb); -extern void dn_dev_hello(struct sk_buff *skb); +void dn_dev_init_pkt(struct sk_buff *skb); +void dn_dev_veri_pkt(struct sk_buff *skb); +void dn_dev_hello(struct sk_buff *skb); -extern void dn_dev_up(struct net_device *); -extern void dn_dev_down(struct net_device *); +void dn_dev_up(struct net_device *); +void dn_dev_down(struct net_device *); -extern int dn_dev_set_default(struct net_device *dev, int force); -extern struct net_device *dn_dev_get_default(void); -extern int dn_dev_bind_default(__le16 *addr); +int dn_dev_set_default(struct net_device *dev, int force); +struct net_device *dn_dev_get_default(void); +int dn_dev_bind_default(__le16 *addr); -extern int register_dnaddr_notifier(struct notifier_block *nb); -extern int unregister_dnaddr_notifier(struct notifier_block *nb); +int register_dnaddr_notifier(struct notifier_block *nb); +int unregister_dnaddr_notifier(struct notifier_block *nb); static inline int dn_dev_islocal(struct net_device *dev, __le16 addr) { - struct dn_dev *dn_db = dev->dn_ptr; + struct dn_dev *dn_db; struct dn_ifaddr *ifa; + int res = 0; + rcu_read_lock(); + dn_db = rcu_dereference(dev->dn_ptr); if (dn_db == NULL) { printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n"); - return 0; + goto out; } - for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) - if ((addr ^ ifa->ifa_local) == 0) - return 1; - - return 0; + for (ifa = rcu_dereference(dn_db->ifa_list); + ifa != NULL; + ifa = rcu_dereference(ifa->ifa_next)) + if ((addr ^ ifa->ifa_local) == 0) { + res = 1; + break; + } +out: + rcu_read_unlock(); + return res; } #endif /* _NET_DN_DEV_H */ diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index 30125119c95..f2ca135ddcc 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -1,25 +1,9 @@ #ifndef _NET_DN_FIB_H #define _NET_DN_FIB_H -/* WARNING: The ordering of these elements must match ordering - * of RTA_* rtnetlink attribute numbers. - */ -struct dn_kern_rta -{ - void *rta_dst; - void *rta_src; - int *rta_iif; - int *rta_oif; - void *rta_gw; - u32 *rta_priority; - void *rta_prefsrc; - struct rtattr *rta_mx; - struct rtattr *rta_mp; - unsigned char *rta_protoinfo; - u32 *rta_flow; - struct rta_cacheinfo *rta_ci; - struct rta_session *rta_sess; -}; +#include <linux/netlink.h> + +extern const struct nla_policy rtm_dn_policy[]; struct dn_fib_res { struct fib_rule *r; @@ -32,7 +16,7 @@ struct dn_fib_res { struct dn_fib_nh { struct net_device *nh_dev; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; int nh_weight; int nh_power; @@ -46,15 +30,11 @@ struct dn_fib_info { int fib_treeref; atomic_t fib_clntref; int fib_dead; - unsigned fib_flags; + unsigned int fib_flags; int fib_protocol; __le16 fib_prefsrc; __u32 fib_priority; __u32 fib_metrics[RTAX_MAX]; -#define dn_fib_mtu fib_metrics[RTAX_MTU-1] -#define dn_fib_window fib_metrics[RTAX_WINDOW-1] -#define dn_fib_rtt fib_metrics[RTAX_RTT-1] -#define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1] int fib_nhs; int fib_power; struct dn_fib_nh fib_nh[0]; @@ -98,12 +78,12 @@ struct dn_fib_table { u32 n; int (*insert)(struct dn_fib_table *t, struct rtmsg *r, - struct dn_kern_rta *rta, struct nlmsghdr *n, + struct nlattr *attrs[], struct nlmsghdr *n, struct netlink_skb_parms *req); int (*delete)(struct dn_fib_table *t, struct rtmsg *r, - struct dn_kern_rta *rta, struct nlmsghdr *n, + struct nlattr *attrs[], struct nlmsghdr *n, struct netlink_skb_parms *req); - int (*lookup)(struct dn_fib_table *t, const struct flowi *fl, + int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld, struct dn_fib_res *res); int (*flush)(struct dn_fib_table *t); int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb); @@ -115,42 +95,38 @@ struct dn_fib_table { /* * dn_fib.c */ -extern void dn_fib_init(void); -extern void dn_fib_cleanup(void); - -extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg); -extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, - struct dn_kern_rta *rta, - const struct nlmsghdr *nlh, int *errp); -extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, - const struct flowi *fl, - struct dn_fib_res *res); -extern void dn_fib_release_info(struct dn_fib_info *fi); -extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type); -extern void dn_fib_flush(void); -extern void dn_fib_select_multipath(const struct flowi *fl, - struct dn_fib_res *res); +void dn_fib_init(void); +void dn_fib_cleanup(void); + +int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, + struct nlattr *attrs[], + const struct nlmsghdr *nlh, int *errp); +int dn_fib_semantic_match(int type, struct dn_fib_info *fi, + const struct flowidn *fld, struct dn_fib_res *res); +void dn_fib_release_info(struct dn_fib_info *fi); +void dn_fib_flush(void); +void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res); /* * dn_tables.c */ -extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat); -extern struct dn_fib_table *dn_fib_empty_table(void); -extern void dn_fib_table_init(void); -extern void dn_fib_table_cleanup(void); +struct dn_fib_table *dn_fib_get_table(u32 n, int creat); +struct dn_fib_table *dn_fib_empty_table(void); +void dn_fib_table_init(void); +void dn_fib_table_cleanup(void); /* * dn_rules.c */ -extern void dn_fib_rules_init(void); -extern void dn_fib_rules_cleanup(void); -extern unsigned dnet_addr_type(__le16 addr); -extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res); +void dn_fib_rules_init(void); +void dn_fib_rules_cleanup(void); +unsigned int dnet_addr_type(__le16 addr); +int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res); -extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); +int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern void dn_fib_free_info(struct dn_fib_info *fi); +void dn_fib_free_info(struct dn_fib_info *fi); static inline void dn_fib_info_put(struct dn_fib_info *fi) { @@ -181,9 +157,9 @@ static inline void dn_fib_res_put(struct dn_fib_res *res) static inline __le16 dnet_make_mask(int n) { - if (n) - return dn_htons(~((1<<(16-n))-1)); - return 0; + if (n) + return cpu_to_le16(~((1 << (16 - n)) - 1)); + return cpu_to_le16(0); } #endif /* _NET_DN_FIB_H */ diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h index 4cb4ae7fb81..fac4e3f4a6d 100644 --- a/include/net/dn_neigh.h +++ b/include/net/dn_neigh.h @@ -16,12 +16,12 @@ struct dn_neigh { __u8 priority; }; -extern void dn_neigh_init(void); -extern void dn_neigh_cleanup(void); -extern int dn_neigh_router_hello(struct sk_buff *skb); -extern int dn_neigh_endnode_hello(struct sk_buff *skb); -extern void dn_neigh_pointopoint_hello(struct sk_buff *skb); -extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); +void dn_neigh_init(void); +void dn_neigh_cleanup(void); +int dn_neigh_router_hello(struct sk_buff *skb); +int dn_neigh_endnode_hello(struct sk_buff *skb); +void dn_neigh_pointopoint_hello(struct sk_buff *skb); +int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); extern struct neigh_table dn_neigh_table; diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 96e816b6974..3a3e33d1845 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h @@ -15,29 +15,32 @@ *******************************************************************************/ /* dn_nsp.c functions prototyping */ -extern void dn_nsp_send_data_ack(struct sock *sk); -extern void dn_nsp_send_oth_ack(struct sock *sk); -extern void dn_nsp_delayed_ack(struct sock *sk); -extern void dn_send_conn_ack(struct sock *sk); -extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp); -extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, - unsigned short reason, gfp_t gfp); -extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, - unsigned short reason); -extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); -extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags); - -extern void dn_nsp_output(struct sock *sk); -extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); -extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob); -extern unsigned long dn_nsp_persist(struct sock *sk); -extern int dn_nsp_xmit_timeout(struct sock *sk); - -extern int dn_nsp_rx(struct sk_buff *); -extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); - -extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); -extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); +void dn_nsp_send_data_ack(struct sock *sk); +void dn_nsp_send_oth_ack(struct sock *sk); +void dn_nsp_delayed_ack(struct sock *sk); +void dn_send_conn_ack(struct sock *sk); +void dn_send_conn_conf(struct sock *sk, gfp_t gfp); +void dn_nsp_send_disc(struct sock *sk, unsigned char type, + unsigned short reason, gfp_t gfp); +void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, + unsigned short reason); +void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); +void dn_nsp_send_conninit(struct sock *sk, unsigned char flags); + +void dn_nsp_output(struct sock *sk); +int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, + struct sk_buff_head *q, unsigned short acknum); +void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, + int oob); +unsigned long dn_nsp_persist(struct sock *sk); +int dn_nsp_xmit_timeout(struct sock *sk); + +int dn_nsp_rx(struct sk_buff *); +int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); + +struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); +struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, + long timeo, int *err); #define NSP_REASON_OK 0 /* No error */ #define NSP_REASON_NR 1 /* No resources */ @@ -70,47 +73,41 @@ extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int nobl /* Data Messages (data segment/interrupt/link service) */ -struct nsp_data_seg_msg -{ +struct nsp_data_seg_msg { __u8 msgflg; __le16 dstaddr; __le16 srcaddr; -} __attribute__((packed)); +} __packed; -struct nsp_data_opt_msg -{ +struct nsp_data_opt_msg { __le16 acknum; __le16 segnum; __le16 lsflgs; -} __attribute__((packed)); +} __packed; -struct nsp_data_opt_msg1 -{ +struct nsp_data_opt_msg1 { __le16 acknum; __le16 segnum; -} __attribute__((packed)); +} __packed; /* Acknowledgment Message (data/other data) */ -struct nsp_data_ack_msg -{ +struct nsp_data_ack_msg { __u8 msgflg; __le16 dstaddr; __le16 srcaddr; __le16 acknum; -} __attribute__((packed)); +} __packed; /* Connect Acknowledgment Message */ -struct nsp_conn_ack_msg -{ +struct nsp_conn_ack_msg { __u8 msgflg; __le16 dstaddr; -} __attribute__((packed)); +} __packed; /* Connect Initiate/Retransmit Initiate/Connect Confirm */ -struct nsp_conn_init_msg -{ +struct nsp_conn_init_msg { __u8 msgflg; #define NSP_CI 0x18 /* Connect Initiate */ #define NSP_RCI 0x68 /* Retrans. Conn Init */ @@ -123,27 +120,25 @@ struct nsp_conn_init_msg #define NSP_FC_MASK 0x0c /* FC type mask */ __u8 info; __le16 segsize; -} __attribute__((packed)); +} __packed; /* Disconnect Initiate/Disconnect Confirm */ -struct nsp_disconn_init_msg -{ +struct nsp_disconn_init_msg { __u8 msgflg; __le16 dstaddr; __le16 srcaddr; __le16 reason; -} __attribute__((packed)); +} __packed; -struct srcobj_fmt -{ +struct srcobj_fmt { __u8 format; __u8 task; __le16 grpcode; __le16 usrcode; __u8 dlen; -} __attribute__((packed)); +} __packed; /* * A collection of functions for manipulating the sequence diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 60c9f22d869..55df9939bca 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -15,10 +15,13 @@ GNU General Public License for more details. *******************************************************************************/ -extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); -extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); -extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern void dn_rt_cache_flush(int delay); +struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); +int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, + struct sock *sk, int flags); +int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); +void dn_rt_cache_flush(int delay); +int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); /* Masks for flags field */ #define DN_RT_F_PID 0x07 /* Mask for packet type */ @@ -65,11 +68,11 @@ extern void dn_rt_cache_flush(int delay); * packets to the originating host. */ struct dn_route { - union { - struct dst_entry dst; - } u; + struct dst_entry dst; - struct flowi fl; + struct neighbour *n; + + struct flowidn fld; __le16 rt_saddr; __le16 rt_daddr; @@ -78,12 +81,22 @@ struct dn_route { __le16 rt_src_map; __le16 rt_dst_map; - unsigned rt_flags; - unsigned rt_type; + unsigned int rt_flags; + unsigned int rt_type; }; -extern void dn_route_init(void); -extern void dn_route_cleanup(void); +static inline bool dn_is_input_route(struct dn_route *rt) +{ + return rt->fld.flowidn_iif != 0; +} + +static inline bool dn_is_output_route(struct dn_route *rt) +{ + return rt->fld.flowidn_iif == 0; +} + +void dn_route_init(void); +void dn_route_cleanup(void); #include <net/sock.h> #include <linux/if_arp.h> diff --git a/include/net/dsa.h b/include/net/dsa.h new file mode 100644 index 00000000000..6efce384451 --- /dev/null +++ b/include/net/dsa.h @@ -0,0 +1,206 @@ +/* + * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_NET_DSA_H +#define __LINUX_NET_DSA_H + +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/workqueue.h> + +#define DSA_MAX_SWITCHES 4 +#define DSA_MAX_PORTS 12 + +struct dsa_chip_data { + /* + * How to access the switch configuration registers. + */ + struct device *mii_bus; + int sw_addr; + + /* + * The names of the switch's ports. Use "cpu" to + * designate the switch port that the cpu is connected to, + * "dsa" to indicate that this port is a DSA link to + * another switch, NULL to indicate the port is unused, + * or any other string to indicate this is a physical port. + */ + char *port_names[DSA_MAX_PORTS]; + + /* + * An array (with nr_chips elements) of which element [a] + * indicates which port on this switch should be used to + * send packets to that are destined for switch a. Can be + * NULL if there is only one switch chip. + */ + s8 *rtable; +}; + +struct dsa_platform_data { + /* + * Reference to a Linux network interface that connects + * to the root switch chip of the tree. + */ + struct device *netdev; + + /* + * Info structs describing each of the switch chips + * connected via this network interface. + */ + int nr_chips; + struct dsa_chip_data *chip; +}; + +struct dsa_switch_tree { + /* + * Configuration data for the platform device that owns + * this dsa switch tree instance. + */ + struct dsa_platform_data *pd; + + /* + * Reference to network device to use, and which tagging + * protocol to use. + */ + struct net_device *master_netdev; + __be16 tag_protocol; + + /* + * The switch and port to which the CPU is attached. + */ + s8 cpu_switch; + s8 cpu_port; + + /* + * Link state polling. + */ + int link_poll_needed; + struct work_struct link_poll_work; + struct timer_list link_poll_timer; + + /* + * Data for the individual switch chips. + */ + struct dsa_switch *ds[DSA_MAX_SWITCHES]; +}; + +struct dsa_switch { + /* + * Parent switch tree, and switch index. + */ + struct dsa_switch_tree *dst; + int index; + + /* + * Configuration data for this switch. + */ + struct dsa_chip_data *pd; + + /* + * The used switch driver. + */ + struct dsa_switch_driver *drv; + + /* + * Reference to mii bus to use. + */ + struct mii_bus *master_mii_bus; + + /* + * Slave mii_bus and devices for the individual ports. + */ + u32 dsa_port_mask; + u32 phys_port_mask; + struct mii_bus *slave_mii_bus; + struct net_device *ports[DSA_MAX_PORTS]; +}; + +static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) +{ + return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); +} + +static inline u8 dsa_upstream_port(struct dsa_switch *ds) +{ + struct dsa_switch_tree *dst = ds->dst; + + /* + * If this is the root switch (i.e. the switch that connects + * to the CPU), return the cpu port number on this switch. + * Else return the (DSA) port number that connects to the + * switch that is one hop closer to the cpu. + */ + if (dst->cpu_switch == ds->index) + return dst->cpu_port; + else + return ds->pd->rtable[dst->cpu_switch]; +} + +struct dsa_switch_driver { + struct list_head list; + + __be16 tag_protocol; + int priv_size; + + /* + * Probing and setup. + */ + char *(*probe)(struct mii_bus *bus, int sw_addr); + int (*setup)(struct dsa_switch *ds); + int (*set_addr)(struct dsa_switch *ds, u8 *addr); + + /* + * Access to the switch's PHY registers. + */ + int (*phy_read)(struct dsa_switch *ds, int port, int regnum); + int (*phy_write)(struct dsa_switch *ds, int port, + int regnum, u16 val); + + /* + * Link state polling and IRQ handling. + */ + void (*poll_link)(struct dsa_switch *ds); + + /* + * ethtool hardware statistics. + */ + void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); + void (*get_ethtool_stats)(struct dsa_switch *ds, + int port, uint64_t *data); + int (*get_sset_count)(struct dsa_switch *ds); +}; + +void register_switch_driver(struct dsa_switch_driver *type); +void unregister_switch_driver(struct dsa_switch_driver *type); + +static inline void *ds_to_priv(struct dsa_switch *ds) +{ + return (void *)(ds + 1); +} + +/* + * The original DSA tag format and some other tag formats have no + * ethertype, which means that we need to add a little hack to the + * networking receive path to make sure that received frames get + * the right ->protocol assigned to them when one of those tag + * formats is in use. + */ +static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) +{ + return !!(dst->tag_protocol == htons(ETH_P_DSA)); +} + +static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) +{ + return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); +} + +#endif diff --git a/include/net/dsfield.h b/include/net/dsfield.h index 8a8d4e06900..e1ad903a8d6 100644 --- a/include/net/dsfield.h +++ b/include/net/dsfield.h @@ -43,11 +43,9 @@ static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask, static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, __u8 value) { - __u16 tmp; + __be16 *p = (__force __be16 *)ipv6h; - tmp = ntohs(*(__be16 *) ipv6h); - tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4); - *(__be16 *) ipv6h = htons(tmp); + *p = (*p & htons((((u16)mask << 4) | 0xf00f))) | htons((u16)value << 4); } diff --git a/include/net/dst.h b/include/net/dst.h index ae13370e848..71c60f42be4 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -8,20 +8,15 @@ #ifndef _NET_DST_H #define _NET_DST_H +#include <net/dst_ops.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/rcupdate.h> +#include <linux/bug.h> #include <linux/jiffies.h> #include <net/neighbour.h> #include <asm/processor.h> -/* - * 0 - no debugging messages - * 1 - rare events and bugs (default) - * 2 - trace mode. - */ -#define RT_CACHE_DEBUG 0 - #define DST_GC_MIN (HZ/10) #define DST_GC_INC (HZ/2) #define DST_GC_MAX (120*HZ) @@ -35,44 +30,67 @@ struct sk_buff; -struct dst_entry -{ +struct dst_entry { struct rcu_head rcu_head; struct dst_entry *child; struct net_device *dev; - short error; - short obsolete; - int flags; -#define DST_HOST 1 -#define DST_NOXFRM 2 -#define DST_NOPOLICY 4 -#define DST_NOHASH 8 - unsigned long expires; - - unsigned short header_len; /* more space at head required */ - unsigned short trailer_len; /* space to reserve at tail */ - - unsigned int rate_tokens; - unsigned long rate_last; /* rate limiting for ICMP */ - + struct dst_ops *ops; + unsigned long _metrics; + unsigned long expires; struct dst_entry *path; - - struct neighbour *neighbour; - struct hh_cache *hh; + struct dst_entry *from; +#ifdef CONFIG_XFRM struct xfrm_state *xfrm; +#else + void *__pad1; +#endif + int (*input)(struct sk_buff *); + int (*output)(struct sock *sk, struct sk_buff *skb); + + unsigned short flags; +#define DST_HOST 0x0001 +#define DST_NOXFRM 0x0002 +#define DST_NOPOLICY 0x0004 +#define DST_NOHASH 0x0008 +#define DST_NOCACHE 0x0010 +#define DST_NOCOUNT 0x0020 +#define DST_FAKE_RTABLE 0x0040 +#define DST_XFRM_TUNNEL 0x0080 +#define DST_XFRM_QUEUE 0x0100 + + unsigned short pending_confirm; - int (*input)(struct sk_buff*); - int (*output)(struct sk_buff*); - - struct dst_ops *ops; - - u32 metrics[RTAX_MAX]; + short error; -#ifdef CONFIG_NET_CLS_ROUTE + /* A non-zero value of dst->obsolete forces by-hand validation + * of the route entry. Positive values are set by the generic + * dst layer to indicate that the entry has been forcefully + * destroyed. + * + * Negative values are used by the implementation layer code to + * force invocation of the dst_ops->check() method. + */ + short obsolete; +#define DST_OBSOLETE_NONE 0 +#define DST_OBSOLETE_DEAD 2 +#define DST_OBSOLETE_FORCE_CHK -1 +#define DST_OBSOLETE_KILL -2 + unsigned short header_len; /* more space at head required */ + unsigned short trailer_len; /* space to reserve at tail */ +#ifdef CONFIG_IP_ROUTE_CLASSID __u32 tclassid; +#else + __u32 __pad2; #endif /* + * Align __refcnt to a 64 bytes alignment + * (L1_CACHE_SIZE would be too much) + */ +#ifdef CONFIG_64BIT + long __pad_to_align_refcnt[2]; +#endif + /* * __refcnt wants to be on a different cache line from * input/output/ops or performance tanks badly */ @@ -80,71 +98,153 @@ struct dst_entry int __use; unsigned long lastuse; union { - struct dst_entry *next; - struct rtable *rt_next; - struct rt6_info *rt6_next; - struct dn_route *dn_next; + struct dst_entry *next; + struct rtable __rcu *rt_next; + struct rt6_info *rt6_next; + struct dn_route __rcu *dn_next; }; }; +u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); +extern const u32 dst_default_metrics[]; + +#define DST_METRICS_READ_ONLY 0x1UL +#define DST_METRICS_FORCE_OVERWRITE 0x2UL +#define DST_METRICS_FLAGS 0x3UL +#define __DST_METRICS_PTR(Y) \ + ((u32 *)((Y) & ~DST_METRICS_FLAGS)) +#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) + +static inline bool dst_metrics_read_only(const struct dst_entry *dst) +{ + return dst->_metrics & DST_METRICS_READ_ONLY; +} -struct dst_ops +static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst) { - unsigned short family; - __be16 protocol; - unsigned gc_thresh; + dst->_metrics |= DST_METRICS_FORCE_OVERWRITE; +} - int (*gc)(struct dst_ops *ops); - struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); - void (*destroy)(struct dst_entry *); - void (*ifdown)(struct dst_entry *, - struct net_device *dev, int how); - struct dst_entry * (*negative_advice)(struct dst_entry *); - void (*link_failure)(struct sk_buff *); - void (*update_pmtu)(struct dst_entry *dst, u32 mtu); - int (*local_out)(struct sk_buff *skb); - int entry_size; +void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); - atomic_t entries; - struct kmem_cache *kmem_cachep; - struct net *dst_net; -}; +static inline void dst_destroy_metrics_generic(struct dst_entry *dst) +{ + unsigned long val = dst->_metrics; + if (!(val & DST_METRICS_READ_ONLY)) + __dst_destroy_metrics_generic(dst, val); +} + +static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) +{ + unsigned long p = dst->_metrics; + + BUG_ON(!p); + + if (p & DST_METRICS_READ_ONLY) + return dst->ops->cow_metrics(dst, p); + return __DST_METRICS_PTR(p); +} + +/* This may only be invoked before the entry has reached global + * visibility. + */ +static inline void dst_init_metrics(struct dst_entry *dst, + const u32 *src_metrics, + bool read_only) +{ + dst->_metrics = ((unsigned long) src_metrics) | + (read_only ? DST_METRICS_READ_ONLY : 0); +} + +static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) +{ + u32 *dst_metrics = dst_metrics_write_ptr(dest); + + if (dst_metrics) { + u32 *src_metrics = DST_METRICS_PTR(src); + + memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); + } +} + +static inline u32 *dst_metrics_ptr(struct dst_entry *dst) +{ + return DST_METRICS_PTR(dst); +} + +static inline u32 +dst_metric_raw(const struct dst_entry *dst, const int metric) +{ + u32 *p = DST_METRICS_PTR(dst); + + return p[metric-1]; +} + +static inline u32 +dst_metric(const struct dst_entry *dst, const int metric) +{ + WARN_ON_ONCE(metric == RTAX_HOPLIMIT || + metric == RTAX_ADVMSS || + metric == RTAX_MTU); + return dst_metric_raw(dst, metric); +} + +static inline u32 +dst_metric_advmss(const struct dst_entry *dst) +{ + u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); + + if (!advmss) + advmss = dst->ops->default_advmss(dst); + + return advmss; +} -#ifdef __KERNEL__ +static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) +{ + u32 *p = dst_metrics_write_ptr(dst); + + if (p) + p[metric-1] = val; +} static inline u32 -dst_metric(const struct dst_entry *dst, int metric) +dst_feature(const struct dst_entry *dst, u32 feature) { - return dst->metrics[metric-1]; + return dst_metric(dst, RTAX_FEATURES) & feature; } static inline u32 dst_mtu(const struct dst_entry *dst) { - u32 mtu = dst_metric(dst, RTAX_MTU); - /* - * Alexey put it here, so ask him about it :) - */ - barrier(); - return mtu; + return dst->ops->mtu(dst); +} + +/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ +static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) +{ + return msecs_to_jiffies(dst_metric(dst, metric)); } static inline u32 dst_allfrag(const struct dst_entry *dst) { - int ret = dst_metric(dst, RTAX_FEATURES) & RTAX_FEATURE_ALLFRAG; - /* Yes, _exactly_. This is paranoia. */ - barrier(); + int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); return ret; } static inline int -dst_metric_locked(struct dst_entry *dst, int metric) +dst_metric_locked(const struct dst_entry *dst, int metric) { return dst_metric(dst, RTAX_LOCK) & (1<<metric); } -static inline void dst_hold(struct dst_entry * dst) +static inline void dst_hold(struct dst_entry *dst) { + /* + * If your kernel compilation stops here, please check + * __pad_to_align_refcnt declaration in struct dst_entry + */ + BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); atomic_inc(&dst->__refcnt); } @@ -155,44 +255,131 @@ static inline void dst_use(struct dst_entry *dst, unsigned long time) dst->lastuse = time; } -static inline -struct dst_entry * dst_clone(struct dst_entry * dst) +static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) +{ + dst->__use++; + dst->lastuse = time; +} + +static inline struct dst_entry *dst_clone(struct dst_entry *dst) { if (dst) atomic_inc(&dst->__refcnt); return dst; } -static inline -void dst_release(struct dst_entry * dst) +void dst_release(struct dst_entry *dst); + +static inline void refdst_drop(unsigned long refdst) +{ + if (!(refdst & SKB_DST_NOREF)) + dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); +} + +/** + * skb_dst_drop - drops skb dst + * @skb: buffer + * + * Drops dst reference count if a reference was taken. + */ +static inline void skb_dst_drop(struct sk_buff *skb) { - if (dst) { - WARN_ON(atomic_read(&dst->__refcnt) < 1); - smp_mb__before_atomic_dec(); - atomic_dec(&dst->__refcnt); + if (skb->_skb_refdst) { + refdst_drop(skb->_skb_refdst); + skb->_skb_refdst = 0UL; } } +static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) +{ + nskb->_skb_refdst = oskb->_skb_refdst; + if (!(nskb->_skb_refdst & SKB_DST_NOREF)) + dst_clone(skb_dst(nskb)); +} + +/** + * skb_dst_force - makes sure skb dst is refcounted + * @skb: buffer + * + * If dst is not yet refcounted, let's do it + */ +static inline void skb_dst_force(struct sk_buff *skb) +{ + if (skb_dst_is_noref(skb)) { + WARN_ON(!rcu_read_lock_held()); + skb->_skb_refdst &= ~SKB_DST_NOREF; + dst_clone(skb_dst(skb)); + } +} + + +/** + * __skb_tunnel_rx - prepare skb for rx reinsert + * @skb: buffer + * @dev: tunnel device + * @net: netns for packet i/o + * + * After decapsulation, packet is going to re-enter (netif_rx()) our stack, + * so make some cleanups. (no accounting done) + */ +static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) +{ + skb->dev = dev; + + /* + * Clear hash so that we can recalulate the hash for the + * encapsulated packet, unless we have already determine the hash + * over the L4 4-tuple. + */ + skb_clear_hash_if_not_l4(skb); + skb_set_queue_mapping(skb, 0); + skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); +} + +/** + * skb_tunnel_rx - prepare skb for rx reinsert + * @skb: buffer + * @dev: tunnel device + * + * After decapsulation, packet is going to re-enter (netif_rx()) our stack, + * so make some cleanups, and perform accounting. + * Note: this accounting is not SMP safe. + */ +static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) +{ + /* TODO : stats should be SMP safe */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + __skb_tunnel_rx(skb, dev, net); +} + /* Children define the path of the packet through the * Linux networking. Thus, destinations are stackable. */ -static inline struct dst_entry *dst_pop(struct dst_entry *dst) +static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) { - struct dst_entry *child = dst_clone(dst->child); + struct dst_entry *child = dst_clone(skb_dst(skb)->child); - dst_release(dst); + skb_dst_drop(skb); return child; } -extern int dst_discard(struct sk_buff *skb); -extern void * dst_alloc(struct dst_ops * ops); -extern void __dst_free(struct dst_entry * dst); -extern struct dst_entry *dst_destroy(struct dst_entry * dst); +int dst_discard_sk(struct sock *sk, struct sk_buff *skb); +static inline int dst_discard(struct sk_buff *skb) +{ + return dst_discard_sk(skb->sk, skb); +} +void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, + int initial_obsolete, unsigned short flags); +void __dst_free(struct dst_entry *dst); +struct dst_entry *dst_destroy(struct dst_entry *dst); -static inline void dst_free(struct dst_entry * dst) +static inline void dst_free(struct dst_entry *dst) { - if (dst->obsolete > 1) + if (dst->obsolete > 0) return; if (!atomic_read(&dst->__refcnt)) { dst = dst_destroy(dst); @@ -210,20 +397,46 @@ static inline void dst_rcu_free(struct rcu_head *head) static inline void dst_confirm(struct dst_entry *dst) { - if (dst) - neigh_confirm(dst->neighbour); + dst->pending_confirm = 1; +} + +static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) +{ + const struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; + + dst->pending_confirm = 0; + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + } + + hh = &n->hh; + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); + else + return n->output(n, skb); } -static inline void dst_negative_advice(struct dst_entry **dst_p) +static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) { - struct dst_entry * dst = *dst_p; - if (dst && dst->ops->negative_advice) - *dst_p = dst->ops->negative_advice(dst); + struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); + return IS_ERR(n) ? NULL : n; +} + +static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, + struct sk_buff *skb) +{ + struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } static inline void dst_link_failure(struct sk_buff *skb) { - struct dst_entry * dst = skb->dst; + struct dst_entry *dst = skb_dst(skb); if (dst && dst->ops && dst->ops->link_failure) dst->ops->link_failure(skb); } @@ -240,25 +453,19 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) } /* Output packet to network from transport. */ +static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb) +{ + return skb_dst(skb)->output(sk, skb); +} static inline int dst_output(struct sk_buff *skb) { - return skb->dst->output(skb); + return dst_output_sk(skb->sk, skb); } /* Input packet from network to transport. */ static inline int dst_input(struct sk_buff *skb) { - int err; - - for (;;) { - err = skb->dst->input(skb); - - if (likely(err == 0)) - return err; - /* Oh, Jamal... Seems, I will not forgive you this mess. :-) */ - if (unlikely(err != NET_XMIT_BYPASS)) - return err; - } + return skb_dst(skb)->input(skb); } static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) @@ -268,32 +475,38 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) return dst; } -extern void dst_init(void); +void dst_init(void); /* Flags for xfrm_lookup flags argument. */ enum { - XFRM_LOOKUP_WAIT = 1 << 0, - XFRM_LOOKUP_ICMP = 1 << 1, + XFRM_LOOKUP_ICMP = 1 << 0, }; struct flowi; #ifndef CONFIG_XFRM -static inline int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, - struct sock *sk, int flags) +static inline struct dst_entry *xfrm_lookup(struct net *net, + struct dst_entry *dst_orig, + const struct flowi *fl, struct sock *sk, + int flags) { - return 0; + return dst_orig; } -static inline int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, - struct sock *sk, int flags) + +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) { - return 0; + return NULL; } + #else -extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, - struct sock *sk, int flags); -extern int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, - struct sock *sk, int flags); -#endif +struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, struct sock *sk, + int flags); + +/* skb attached with this dst needs transformation if dst->xfrm is valid */ +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) +{ + return dst->xfrm; +} #endif #endif /* _NET_DST_H */ diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h new file mode 100644 index 00000000000..2f26dfb8450 --- /dev/null +++ b/include/net/dst_ops.h @@ -0,0 +1,74 @@ +#ifndef _NET_DST_OPS_H +#define _NET_DST_OPS_H +#include <linux/types.h> +#include <linux/percpu_counter.h> +#include <linux/cache.h> + +struct dst_entry; +struct kmem_cachep; +struct net_device; +struct sk_buff; +struct sock; + +struct dst_ops { + unsigned short family; + __be16 protocol; + unsigned int gc_thresh; + + int (*gc)(struct dst_ops *ops); + struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, unsigned long); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, + struct net_device *dev, int how); + struct dst_entry * (*negative_advice)(struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, u32 mtu); + void (*redirect)(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb); + int (*local_out)(struct sk_buff *skb); + struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr); + + struct kmem_cache *kmem_cachep; + + struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; +}; + +static inline int dst_entries_get_fast(struct dst_ops *dst) +{ + return percpu_counter_read_positive(&dst->pcpuc_entries); +} + +static inline int dst_entries_get_slow(struct dst_ops *dst) +{ + int res; + + local_bh_disable(); + res = percpu_counter_sum_positive(&dst->pcpuc_entries); + local_bh_enable(); + return res; +} + +static inline void dst_entries_add(struct dst_ops *dst, int val) +{ + local_bh_disable(); + percpu_counter_add(&dst->pcpuc_entries, val); + local_bh_enable(); +} + +static inline int dst_entries_init(struct dst_ops *dst) +{ + return percpu_counter_init(&dst->pcpuc_entries, 0); +} + +static inline void dst_entries_destroy(struct dst_ops *dst) +{ + percpu_counter_destroy(&dst->pcpuc_entries); +} + +#endif diff --git a/include/net/esp.h b/include/net/esp.h index d58451331db..a43be85aedc 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -3,18 +3,6 @@ #include <linux/skbuff.h> -struct crypto_aead; - -struct esp_data { - /* 0..255 */ - int padlen; - - /* Confidentiality & Integrity */ - struct crypto_aead *aead; -}; - -extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); - struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/ethoc.h b/include/net/ethoc.h new file mode 100644 index 00000000000..2a2d6bb34eb --- /dev/null +++ b/include/net/ethoc.h @@ -0,0 +1,23 @@ +/* + * linux/include/net/ethoc.h + * + * Copyright (C) 2008-2009 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by Thierry Reding <thierry.reding@avionic-design.de> + */ + +#ifndef LINUX_NET_ETHOC_H +#define LINUX_NET_ETHOC_H 1 + +struct ethoc_platform_data { + u8 hwaddr[IFHWADDRLEN]; + s8 phy_id; + u32 eth_clkfreq; +}; + +#endif /* !LINUX_NET_ETHOC_H */ + diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 34349f9f433..e584de16e4c 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -2,38 +2,44 @@ #define __NET_FIB_RULES_H #include <linux/types.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/fib_rules.h> #include <net/flow.h> #include <net/rtnetlink.h> -struct fib_rule -{ +struct fib_rule { struct list_head list; - atomic_t refcnt; - int ifindex; - char ifname[IFNAMSIZ]; + int iifindex; + int oifindex; u32 mark; u32 mark_mask; - u32 pref; u32 flags; u32 table; u8 action; + /* 3 bytes hole, try to use */ u32 target; - struct fib_rule * ctarget; + struct fib_rule __rcu *ctarget; + struct net *fr_net; + + atomic_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; + char iifname[IFNAMSIZ]; + char oifname[IFNAMSIZ]; struct rcu_head rcu; - struct net * fr_net; }; -struct fib_lookup_arg -{ +struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; + int flags; +#define FIB_LOOKUP_NOREF 1 }; -struct fib_rules_ops -{ +struct fib_rules_ops { int family; struct list_head list; int rule_size; @@ -44,39 +50,44 @@ struct fib_rules_ops int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, + struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, - struct nlmsghdr *, struct fib_rule_hdr *, struct nlattr **); + void (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, - struct nlmsghdr *, struct fib_rule_hdr *); u32 (*default_pref)(struct fib_rules_ops *ops); size_t (*nlmsg_payload)(struct fib_rule *); /* Called after modifications to the rules set, must flush * the route cache if one exists. */ - void (*flush_cache)(void); + void (*flush_cache)(struct fib_rules_ops *ops); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; + struct rcu_head rcu; }; #define FRA_GENERIC_POLICY \ - [FRA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ + [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ + [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_PRIORITY] = { .type = NLA_U32 }, \ [FRA_FWMARK] = { .type = NLA_U32 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ + [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ + [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \ [FRA_GOTO] = { .type = NLA_U32 } static inline void fib_rule_get(struct fib_rule *rule) @@ -87,6 +98,7 @@ static inline void fib_rule_get(struct fib_rule *rule) static inline void fib_rule_put_rcu(struct rcu_head *head) { struct fib_rule *rule = container_of(head, struct fib_rule, rcu); + release_net(rule->fr_net); kfree(rule); } @@ -103,14 +115,13 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) return frh->table; } -extern int fib_rules_register(struct fib_rules_ops *); -extern void fib_rules_unregister(struct fib_rules_ops *); -extern void fib_rules_cleanup_ops(struct fib_rules_ops *); +struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, + struct net *); +void fib_rules_unregister(struct fib_rules_ops *); -extern int fib_rules_lookup(struct fib_rules_ops *, - struct flowi *, int flags, - struct fib_lookup_arg *); -extern int fib_default_rule_add(struct fib_rules_ops *, - u32 pref, u32 table, - u32 flags); +int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, + struct fib_lookup_arg *); +int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, + u32 flags); +u32 fib_default_rule_pref(struct fib_rules_ops *ops); #endif diff --git a/include/net/firewire.h b/include/net/firewire.h new file mode 100644 index 00000000000..31bcbfe7a22 --- /dev/null +++ b/include/net/firewire.h @@ -0,0 +1,25 @@ +#ifndef _NET_FIREWIRE_H +#define _NET_FIREWIRE_H + +/* Pseudo L2 address */ +#define FWNET_ALEN 16 +union fwnet_hwaddr { + u8 u[FWNET_ALEN]; + /* "Hardware address" defined in RFC2734/RF3146 */ + struct { + __be64 uniq_id; /* EUI-64 */ + u8 max_rec; /* max packet size */ + u8 sspd; /* max speed */ + __be16 fifo_hi; /* hi 16bits of FIFO addr */ + __be32 fifo_lo; /* lo 32bits of FIFO addr */ + } __packed uc; +}; + +/* Pseudo L2 Header */ +#define FWNET_HLEN 18 +struct fwnet_header { + u8 h_dest[FWNET_ALEN]; /* destination address */ + __be16 h_proto; /* packet type ID field */ +} __packed; + +#endif diff --git a/include/net/flow.h b/include/net/flow.h index ad16e0076c8..8109a159d1b 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -7,95 +7,230 @@ #ifndef _NET_FLOW_H #define _NET_FLOW_H +#include <linux/socket.h> #include <linux/in6.h> -#include <asm/atomic.h> +#include <linux/atomic.h> -struct flowi { - int oif; - int iif; - __u32 mark; +/* + * ifindex generation is per-net namespace, and loopback is + * always the 1st device in ns (see net_dev_init), thus any + * loopback device should get ifindex 1 + */ +#define LOOPBACK_IFINDEX 1 + +struct flowi_common { + int flowic_oif; + int flowic_iif; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; +#define FLOWI_FLAG_ANYSRC 0x01 +#define FLOWI_FLAG_KNOWN_NH 0x02 + __u32 flowic_secid; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + + struct { + __u8 type; + __u8 code; + } icmpt; + + struct { + __le16 dport; + __le16 sport; + } dnports; + + __be32 spi; + __be32 gre_key; + + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; +#define flowi4_oif __fl_common.flowic_oif +#define flowi4_iif __fl_common.flowic_iif +#define flowi4_mark __fl_common.flowic_mark +#define flowi4_tos __fl_common.flowic_tos +#define flowi4_scope __fl_common.flowic_scope +#define flowi4_proto __fl_common.flowic_proto +#define flowi4_flags __fl_common.flowic_flags +#define flowi4_secid __fl_common.flowic_secid + + /* (saddr,daddr) must be grouped, same order as in IP header */ + __be32 saddr; + __be32 daddr; + + union flowi_uli uli; +#define fl4_sport uli.ports.sport +#define fl4_dport uli.ports.dport +#define fl4_icmp_type uli.icmpt.type +#define fl4_icmp_code uli.icmpt.code +#define fl4_ipsec_spi uli.spi +#define fl4_mh_type uli.mht.type +#define fl4_gre_key uli.gre_key +} __attribute__((__aligned__(BITS_PER_LONG/8))); + +static inline void flowi4_init_output(struct flowi4 *fl4, int oif, + __u32 mark, __u8 tos, __u8 scope, + __u8 proto, __u8 flags, + __be32 daddr, __be32 saddr, + __be16 dport, __be16 sport) +{ + fl4->flowi4_oif = oif; + fl4->flowi4_iif = LOOPBACK_IFINDEX; + fl4->flowi4_mark = mark; + fl4->flowi4_tos = tos; + fl4->flowi4_scope = scope; + fl4->flowi4_proto = proto; + fl4->flowi4_flags = flags; + fl4->flowi4_secid = 0; + fl4->daddr = daddr; + fl4->saddr = saddr; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; +} + +/* Reset some input parameters after previous lookup */ +static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos, + __be32 daddr, __be32 saddr) +{ + fl4->flowi4_oif = oif; + fl4->flowi4_tos = tos; + fl4->daddr = daddr; + fl4->saddr = saddr; +} + + +struct flowi6 { + struct flowi_common __fl_common; +#define flowi6_oif __fl_common.flowic_oif +#define flowi6_iif __fl_common.flowic_iif +#define flowi6_mark __fl_common.flowic_mark +#define flowi6_tos __fl_common.flowic_tos +#define flowi6_scope __fl_common.flowic_scope +#define flowi6_proto __fl_common.flowic_proto +#define flowi6_flags __fl_common.flowic_flags +#define flowi6_secid __fl_common.flowic_secid + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; +#define fl6_sport uli.ports.sport +#define fl6_dport uli.ports.dport +#define fl6_icmp_type uli.icmpt.type +#define fl6_icmp_code uli.icmpt.code +#define fl6_ipsec_spi uli.spi +#define fl6_mh_type uli.mht.type +#define fl6_gre_key uli.gre_key +} __attribute__((__aligned__(BITS_PER_LONG/8))); + +struct flowidn { + struct flowi_common __fl_common; +#define flowidn_oif __fl_common.flowic_oif +#define flowidn_iif __fl_common.flowic_iif +#define flowidn_mark __fl_common.flowic_mark +#define flowidn_scope __fl_common.flowic_scope +#define flowidn_proto __fl_common.flowic_proto +#define flowidn_flags __fl_common.flowic_flags + __le16 daddr; + __le16 saddr; + union flowi_uli uli; +#define fld_sport uli.ports.sport +#define fld_dport uli.ports.dport +} __attribute__((__aligned__(BITS_PER_LONG/8))); + +struct flowi { union { - struct { - __be32 daddr; - __be32 saddr; - __u8 tos; - __u8 scope; - } ip4_u; - - struct { - struct in6_addr daddr; - struct in6_addr saddr; - __be32 flowlabel; - } ip6_u; - - struct { - __le16 daddr; - __le16 saddr; - __u8 scope; - } dn_u; - } nl_u; -#define fld_dst nl_u.dn_u.daddr -#define fld_src nl_u.dn_u.saddr -#define fld_scope nl_u.dn_u.scope -#define fl6_dst nl_u.ip6_u.daddr -#define fl6_src nl_u.ip6_u.saddr -#define fl6_flowlabel nl_u.ip6_u.flowlabel -#define fl4_dst nl_u.ip4_u.daddr -#define fl4_src nl_u.ip4_u.saddr -#define fl4_tos nl_u.ip4_u.tos -#define fl4_scope nl_u.ip4_u.scope - - __u8 proto; - __u8 flags; - union { - struct { - __be16 sport; - __be16 dport; - } ports; - - struct { - __u8 type; - __u8 code; - } icmpt; - - struct { - __le16 sport; - __le16 dport; - } dnports; - - __be32 spi; - - struct { - __u8 type; - } mht; - } uli_u; -#define fl_ip_sport uli_u.ports.sport -#define fl_ip_dport uli_u.ports.dport -#define fl_icmp_type uli_u.icmpt.type -#define fl_icmp_code uli_u.icmpt.code -#define fl_ipsec_spi uli_u.spi -#define fl_mh_type uli_u.mht.type - __u32 secid; /* used by xfrm; see secid.txt */ + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + struct flowidn dn; + } u; +#define flowi_oif u.__fl_common.flowic_oif +#define flowi_iif u.__fl_common.flowic_iif +#define flowi_mark u.__fl_common.flowic_mark +#define flowi_tos u.__fl_common.flowic_tos +#define flowi_scope u.__fl_common.flowic_scope +#define flowi_proto u.__fl_common.flowic_proto +#define flowi_flags u.__fl_common.flowic_flags +#define flowi_secid u.__fl_common.flowic_secid } __attribute__((__aligned__(BITS_PER_LONG/8))); +static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) +{ + return container_of(fl4, struct flowi, u.ip4); +} + +static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) +{ + return container_of(fl6, struct flowi, u.ip6); +} + +static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) +{ + return container_of(fldn, struct flowi, u.dn); +} + +typedef unsigned long flow_compare_t; + +static inline size_t flow_key_size(u16 family) +{ + switch (family) { + case AF_INET: + BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); + return sizeof(struct flowi4) / sizeof(flow_compare_t); + case AF_INET6: + BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); + return sizeof(struct flowi6) / sizeof(flow_compare_t); + case AF_DECnet: + BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); + return sizeof(struct flowidn) / sizeof(flow_compare_t); + } + return 0; +} + #define FLOW_DIR_IN 0 #define FLOW_DIR_OUT 1 #define FLOW_DIR_FWD 2 +struct net; struct sock; -typedef int (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, - void **objp, atomic_t **obj_refp); +struct flow_cache_ops; -extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, - flow_resolve_t resolver); -extern void flow_cache_flush(void); -extern atomic_t flow_cache_genid; +struct flow_cache_object { + const struct flow_cache_ops *ops; +}; -static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2) -{ - return (fl1->proto == fl2->proto && - !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u))); -} +struct flow_cache_ops { + struct flow_cache_object *(*get)(struct flow_cache_object *); + int (*check)(struct flow_cache_object *); + void (*delete)(struct flow_cache_object *); +}; + +typedef struct flow_cache_object *(*flow_resolve_t)( + struct net *net, const struct flowi *key, u16 family, + u8 dir, struct flow_cache_object *oldobj, void *ctx); + +struct flow_cache_object *flow_cache_lookup(struct net *net, + const struct flowi *key, u16 family, + u8 dir, flow_resolve_t resolver, + void *ctx); +int flow_cache_init(struct net *net); +void flow_cache_fini(struct net *net); + +void flow_cache_flush(struct net *net); +void flow_cache_flush_deferred(struct net *net); +extern atomic_t flow_cache_genid; #endif diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h new file mode 100644 index 00000000000..7e64bd8bbda --- /dev/null +++ b/include/net/flow_keys.h @@ -0,0 +1,18 @@ +#ifndef _NET_FLOW_KEYS_H +#define _NET_FLOW_KEYS_H + +struct flow_keys { + /* (src,dst) must be grouped, in the same way than in IP header */ + __be32 src; + __be32 dst; + union { + __be32 ports; + __be16 port16[2]; + }; + u16 thoff; + u8 ip_proto; +}; + +bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); +__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto); +#endif diff --git a/include/net/flowcache.h b/include/net/flowcache.h new file mode 100644 index 00000000000..c8f665ec6e0 --- /dev/null +++ b/include/net/flowcache.h @@ -0,0 +1,25 @@ +#ifndef _NET_FLOWCACHE_H +#define _NET_FLOWCACHE_H + +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/notifier.h> + +struct flow_cache_percpu { + struct hlist_head *hash_table; + int hash_count; + u32 hash_rnd; + int hash_rnd_recalc; + struct tasklet_struct flush_tasklet; +}; + +struct flow_cache { + u32 hash_shift; + struct flow_cache_percpu __percpu *percpu; + struct notifier_block hotcpu_notifier; + int low_watermark; + int high_watermark; + struct timer_list rnd_timer; +}; +#endif /* _NET_FLOWCACHE_H */ diff --git a/include/net/garp.h b/include/net/garp.h new file mode 100644 index 00000000000..abf33bbd2e6 --- /dev/null +++ b/include/net/garp.h @@ -0,0 +1,129 @@ +#ifndef _NET_GARP_H +#define _NET_GARP_H + +#include <net/stp.h> + +#define GARP_PROTOCOL_ID 0x1 +#define GARP_END_MARK 0x0 + +struct garp_pdu_hdr { + __be16 protocol; +}; + +struct garp_msg_hdr { + u8 attrtype; +}; + +enum garp_attr_event { + GARP_LEAVE_ALL, + GARP_JOIN_EMPTY, + GARP_JOIN_IN, + GARP_LEAVE_EMPTY, + GARP_LEAVE_IN, + GARP_EMPTY, +}; + +struct garp_attr_hdr { + u8 len; + u8 event; + u8 data[]; +}; + +struct garp_skb_cb { + u8 cur_type; +}; + +static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct garp_skb_cb) > + FIELD_SIZEOF(struct sk_buff, cb)); + return (struct garp_skb_cb *)skb->cb; +} + +enum garp_applicant_state { + GARP_APPLICANT_INVALID, + GARP_APPLICANT_VA, + GARP_APPLICANT_AA, + GARP_APPLICANT_QA, + GARP_APPLICANT_LA, + GARP_APPLICANT_VP, + GARP_APPLICANT_AP, + GARP_APPLICANT_QP, + GARP_APPLICANT_VO, + GARP_APPLICANT_AO, + GARP_APPLICANT_QO, + __GARP_APPLICANT_MAX +}; +#define GARP_APPLICANT_MAX (__GARP_APPLICANT_MAX - 1) + +enum garp_event { + GARP_EVENT_REQ_JOIN, + GARP_EVENT_REQ_LEAVE, + GARP_EVENT_R_JOIN_IN, + GARP_EVENT_R_JOIN_EMPTY, + GARP_EVENT_R_EMPTY, + GARP_EVENT_R_LEAVE_IN, + GARP_EVENT_R_LEAVE_EMPTY, + GARP_EVENT_TRANSMIT_PDU, + __GARP_EVENT_MAX +}; +#define GARP_EVENT_MAX (__GARP_EVENT_MAX - 1) + +enum garp_action { + GARP_ACTION_NONE, + GARP_ACTION_S_JOIN_IN, + GARP_ACTION_S_LEAVE_EMPTY, +}; + +struct garp_attr { + struct rb_node node; + enum garp_applicant_state state; + u8 type; + u8 dlen; + unsigned char data[]; +}; + +enum garp_applications { + GARP_APPLICATION_GVRP, + __GARP_APPLICATION_MAX +}; +#define GARP_APPLICATION_MAX (__GARP_APPLICATION_MAX - 1) + +struct garp_application { + enum garp_applications type; + unsigned int maxattr; + struct stp_proto proto; +}; + +struct garp_applicant { + struct garp_application *app; + struct net_device *dev; + struct timer_list join_timer; + + spinlock_t lock; + struct sk_buff_head queue; + struct sk_buff *pdu; + struct rb_root gid; + struct rcu_head rcu; +}; + +struct garp_port { + struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1]; + struct rcu_head rcu; +}; + +int garp_register_application(struct garp_application *app); +void garp_unregister_application(struct garp_application *app); + +int garp_init_applicant(struct net_device *dev, struct garp_application *app); +void garp_uninit_applicant(struct net_device *dev, + struct garp_application *app); + +int garp_request_join(const struct net_device *dev, + const struct garp_application *app, const void *data, + u8 len, u8 type); +void garp_request_leave(const struct net_device *dev, + const struct garp_application *app, + const void *data, u8 len, u8 type); + +#endif /* _NET_GARP_H */ diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 8cd8185fa2e..ea4271dceff 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,13 +6,12 @@ #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> -struct gnet_dump -{ +struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; struct nlattr * tail; - /* Backward compatability */ + /* Backward compatibility */ int compat_tc_stats; int compat_xstats; void * xstats; @@ -20,30 +19,31 @@ struct gnet_dump struct tc_stats tc_stats; }; -extern int gnet_stats_start_copy(struct sk_buff *skb, int type, - spinlock_t *lock, struct gnet_dump *d); +int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, + struct gnet_dump *d); -extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, - int tc_stats_type,int xstats_type, - spinlock_t *lock, struct gnet_dump *d); - -extern int gnet_stats_copy_basic(struct gnet_dump *d, - struct gnet_stats_basic *b); -extern int gnet_stats_copy_rate_est(struct gnet_dump *d, - struct gnet_stats_rate_est *r); -extern int gnet_stats_copy_queue(struct gnet_dump *d, - struct gnet_stats_queue *q); -extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); - -extern int gnet_stats_finish_copy(struct gnet_dump *d); - -extern int gen_new_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, - spinlock_t *stats_lock, struct nlattr *opt); -extern void gen_kill_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est); -extern int gen_replace_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, - spinlock_t *stats_lock, struct nlattr *opt); +int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, + int tc_stats_type, int xstats_type, + spinlock_t *lock, struct gnet_dump *d); +int gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_packed *b); +int gnet_stats_copy_rate_est(struct gnet_dump *d, + const struct gnet_stats_basic_packed *b, + struct gnet_stats_rate_est64 *r); +int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q); +int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); + +int gnet_stats_finish_copy(struct gnet_dump *d); + +int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, struct nlattr *opt); +void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est); +int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, struct nlattr *opt); +bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, + const struct gnet_stats_rate_est64 *rate_est); #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index decdda54682..93695f0e22a 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -3,23 +3,21 @@ #include <linux/genetlink.h> #include <net/netlink.h> +#include <net/net_namespace.h> + +#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) /** * struct genl_multicast_group - generic netlink multicast group * @name: name of the multicast group, names are per-family - * @id: multicast group ID, assigned by the core, to use with - * genlmsg_multicast(). - * @list: list entry for linking - * @family: pointer to family, need not be set before registering */ -struct genl_multicast_group -{ - struct genl_family *family; /* private */ - struct list_head list; /* private */ +struct genl_multicast_group { char name[GENL_NAMSIZ]; - u32 id; }; +struct genl_ops; +struct genl_info; + /** * struct genl_family - generic netlink family * @id: protocol family idenfitier @@ -27,46 +25,84 @@ struct genl_multicast_group * @name: name of family * @version: protocol version * @maxattr: maximum number of attributes supported + * @netnsok: set to true if the family can handle network + * namespaces and should be presented in all of them + * @pre_doit: called before an operation's doit callback, it may + * do additional, common, filtering and return an error + * @post_doit: called after an operation's doit callback, it may + * undo operations done by pre_doit, for example release locks * @attrbuf: buffer to store parsed attributes - * @ops_list: list of all assigned operations * @family_list: family list - * @mcast_groups: multicast groups list + * @mcgrps: multicast groups used by this family (private) + * @n_mcgrps: number of multicast groups (private) + * @mcgrp_offset: starting number of multicast group IDs in this family + * @ops: the operations supported by this family (private) + * @n_ops: number of operations supported by this family (private) */ -struct genl_family -{ +struct genl_family { unsigned int id; unsigned int hdrsize; char name[GENL_NAMSIZ]; unsigned int version; unsigned int maxattr; + bool netnsok; + bool parallel_ops; + int (*pre_doit)(const struct genl_ops *ops, + struct sk_buff *skb, + struct genl_info *info); + void (*post_doit)(const struct genl_ops *ops, + struct sk_buff *skb, + struct genl_info *info); struct nlattr ** attrbuf; /* private */ - struct list_head ops_list; /* private */ + const struct genl_ops * ops; /* private */ + const struct genl_multicast_group *mcgrps; /* private */ + unsigned int n_ops; /* private */ + unsigned int n_mcgrps; /* private */ + unsigned int mcgrp_offset; /* private */ struct list_head family_list; /* private */ - struct list_head mcast_groups; /* private */ + struct module *module; }; /** * struct genl_info - receiving information * @snd_seq: sending sequence number - * @snd_pid: netlink pid of sender + * @snd_portid: netlink portid of sender * @nlhdr: netlink message header * @genlhdr: generic netlink message header * @userhdr: user specific header * @attrs: netlink attributes + * @_net: network namespace + * @user_ptr: user pointers + * @dst_sk: destination socket */ -struct genl_info -{ +struct genl_info { u32 snd_seq; - u32 snd_pid; + u32 snd_portid; struct nlmsghdr * nlhdr; struct genlmsghdr * genlhdr; void * userhdr; struct nlattr ** attrs; +#ifdef CONFIG_NET_NS + struct net * _net; +#endif + void * user_ptr[2]; + struct sock * dst_sk; }; +static inline struct net *genl_info_net(struct genl_info *info) +{ + return read_pnet(&info->_net); +} + +static inline void genl_info_net_set(struct genl_info *info, struct net *net) +{ + write_pnet(&info->_net, net); +} + /** * struct genl_ops - generic netlink operations * @cmd: command identifier + * @internal_flags: flags used by the family * @flags: flags * @policy: attribute validation policy * @doit: standard command callback @@ -74,58 +110,110 @@ struct genl_info * @done: completion callback for dumps * @ops_list: operations list */ -struct genl_ops -{ - u8 cmd; - unsigned int flags; +struct genl_ops { const struct nla_policy *policy; int (*doit)(struct sk_buff *skb, struct genl_info *info); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); - struct list_head ops_list; + u8 cmd; + u8 internal_flags; + u8 flags; }; -extern int genl_register_family(struct genl_family *family); -extern int genl_unregister_family(struct genl_family *family); -extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); -extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); -extern int genl_register_mc_group(struct genl_family *family, - struct genl_multicast_group *grp); -extern void genl_unregister_mc_group(struct genl_family *family, - struct genl_multicast_group *grp); +int __genl_register_family(struct genl_family *family); -extern struct sock *genl_sock; +static inline int genl_register_family(struct genl_family *family) +{ + family->module = THIS_MODULE; + return __genl_register_family(family); +} /** - * genlmsg_put - Add generic netlink header to netlink message - * @skb: socket buffer holding the message - * @pid: netlink pid the message is addressed to - * @seq: sequence number (usually the one of the sender) + * genl_register_family_with_ops - register a generic netlink family with ops * @family: generic netlink family - * @flags netlink message flags - * @cmd: generic netlink command + * @ops: operations to be registered + * @n_ops: number of elements to register * - * Returns pointer to user specific header + * Registers the specified family and operations from the specified table. + * Only one family may be registered with the same family name or identifier. + * + * The family id may equal GENL_ID_GENERATE causing an unique id to + * be automatically generated and assigned. + * + * Either a doit or dumpit callback must be specified for every registered + * operation or the function will fail. Only one operation structure per + * command identifier may be registered. + * + * See include/net/genetlink.h for more documenation on the operations + * structure. + * + * Return 0 on success or a negative error code. */ -static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, - struct genl_family *family, int flags, u8 cmd) +static inline int +_genl_register_family_with_ops_grps(struct genl_family *family, + const struct genl_ops *ops, size_t n_ops, + const struct genl_multicast_group *mcgrps, + size_t n_mcgrps) { - struct nlmsghdr *nlh; - struct genlmsghdr *hdr; + family->module = THIS_MODULE; + family->ops = ops; + family->n_ops = n_ops; + family->mcgrps = mcgrps; + family->n_mcgrps = n_mcgrps; + return __genl_register_family(family); +} + +#define genl_register_family_with_ops(family, ops) \ + _genl_register_family_with_ops_grps((family), \ + (ops), ARRAY_SIZE(ops), \ + NULL, 0) +#define genl_register_family_with_ops_groups(family, ops, grps) \ + _genl_register_family_with_ops_grps((family), \ + (ops), ARRAY_SIZE(ops), \ + (grps), ARRAY_SIZE(grps)) - nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN + - family->hdrsize, flags); - if (nlh == NULL) - return NULL; +int genl_unregister_family(struct genl_family *family); +void genl_notify(struct genl_family *family, + struct sk_buff *skb, struct net *net, u32 portid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); - hdr = nlmsg_data(nlh); - hdr->cmd = cmd; - hdr->version = family->version; - hdr->reserved = 0; +struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, + gfp_t flags); +void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, + struct genl_family *family, int flags, u8 cmd); + +/** + * genlmsg_nlhdr - Obtain netlink header from user specified header + * @user_hdr: user header as returned from genlmsg_put() + * @family: generic netlink family + * + * Returns pointer to netlink header. + */ +static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr, + struct genl_family *family) +{ + return (struct nlmsghdr *)((char *)user_hdr - + family->hdrsize - + GENL_HDRLEN - + NLMSG_HDRLEN); +} - return (char *) hdr + GENL_HDRLEN; +/** + * genl_dump_check_consistent - check if sequence is consistent and advertise if not + * @cb: netlink callback structure that stores the sequence number + * @user_hdr: user header as returned from genlmsg_put() + * @family: generic netlink family + * + * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it + * simpler to use with generic netlink. + */ +static inline void genl_dump_check_consistent(struct netlink_callback *cb, + void *user_hdr, + struct genl_family *family) +{ + nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family)); } /** @@ -143,7 +231,7 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb, struct genl_family *family, int flags, u8 cmd) { - return genlmsg_put(skb, info->snd_pid, info->snd_seq, family, + return genlmsg_put(skb, info->snd_portid, info->snd_seq, family, flags, cmd); } @@ -162,32 +250,69 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr) * @skb: socket buffer the message is stored in * @hdr: generic netlink message header */ -static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr) +static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr) +{ + if (hdr) + nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); +} + +/** + * genlmsg_multicast_netns - multicast a netlink message to a specific netns + * @family: the generic netlink family + * @net: the net namespace + * @skb: netlink message as socket buffer + * @portid: own netlink portid to avoid sending to yourself + * @group: offset of multicast group in groups array + * @flags: allocation flags + */ +static inline int genlmsg_multicast_netns(struct genl_family *family, + struct net *net, struct sk_buff *skb, + u32 portid, unsigned int group, gfp_t flags) { - return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); + if (WARN_ON_ONCE(group >= family->n_mcgrps)) + return -EINVAL; + group = family->mcgrp_offset + group; + return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); } /** - * genlmsg_multicast - multicast a netlink message + * genlmsg_multicast - multicast a netlink message to the default netns + * @family: the generic netlink family * @skb: netlink message as socket buffer - * @pid: own netlink pid to avoid sending to yourself - * @group: multicast group id + * @portid: own netlink portid to avoid sending to yourself + * @group: offset of multicast group in groups array * @flags: allocation flags */ -static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid, +static inline int genlmsg_multicast(struct genl_family *family, + struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { - return nlmsg_multicast(genl_sock, skb, pid, group, flags); + return genlmsg_multicast_netns(family, &init_net, skb, + portid, group, flags); } /** + * genlmsg_multicast_allns - multicast a netlink message to all net namespaces + * @family: the generic netlink family + * @skb: netlink message as socket buffer + * @portid: own netlink portid to avoid sending to yourself + * @group: offset of multicast group in groups array + * @flags: allocation flags + * + * This function must hold the RTNL or rcu_read_lock(). + */ +int genlmsg_multicast_allns(struct genl_family *family, + struct sk_buff *skb, u32 portid, + unsigned int group, gfp_t flags); + +/** * genlmsg_unicast - unicast a netlink message * @skb: netlink message as socket buffer - * @pid: netlink pid of the destination socket + * @portid: netlink portid of the destination socket */ -static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid) +static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid) { - return nlmsg_unicast(genl_sock, skb, pid); + return nlmsg_unicast(net->genl_sock, skb, portid); } /** @@ -197,12 +322,12 @@ static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid) */ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info) { - return genlmsg_unicast(skb, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); } /** * gennlmsg_data - head of message payload - * @gnlh: genetlink messsage header + * @gnlh: genetlink message header */ static inline void *genlmsg_data(const struct genlmsghdr *gnlh) { @@ -248,5 +373,25 @@ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags) return nlmsg_new(genlmsg_total_size(payload), flags); } +/** + * genl_set_err - report error to genetlink broadcast listeners + * @family: the generic netlink family + * @net: the network namespace to report the error to + * @portid: the PORTID of a process that we want to skip (if any) + * @group: the broadcast group that will notice the error + * (this is the offset of the multicast group in the groups array) + * @code: error code, must be negative (as usual in kernelspace) + * + * This function returns the number of broadcast listeners that have set the + * NETLINK_RECV_NO_ENOBUFS socket option. + */ +static inline int genl_set_err(struct genl_family *family, struct net *net, + u32 portid, u32 group, int code) +{ + if (WARN_ON_ONCE(group >= family->n_mcgrps)) + return -EINVAL; + group = family->mcgrp_offset + group; + return netlink_set_err(net->genl_sock, portid, group, code); +} #endif /* __NET_GENERIC_NETLINK_H */ diff --git a/include/net/gre.h b/include/net/gre.h new file mode 100644 index 00000000000..b5318201874 --- /dev/null +++ b/include/net/gre.h @@ -0,0 +1,104 @@ +#ifndef __LINUX_GRE_H +#define __LINUX_GRE_H + +#include <linux/skbuff.h> +#include <net/ip_tunnels.h> + +#define GREPROTO_CISCO 0 +#define GREPROTO_PPTP 1 +#define GREPROTO_MAX 2 +#define GRE_IP_PROTO_MAX 2 + +struct gre_protocol { + int (*handler)(struct sk_buff *skb); + void (*err_handler)(struct sk_buff *skb, u32 info); +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; +#define GRE_HEADER_SECTION 4 + +int gre_add_protocol(const struct gre_protocol *proto, u8 version); +int gre_del_protocol(const struct gre_protocol *proto, u8 version); + +struct gre_cisco_protocol { + int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi); + int (*err_handler)(struct sk_buff *skb, u32 info, + const struct tnl_ptk_info *tpi); + u8 priority; +}; + +int gre_cisco_register(struct gre_cisco_protocol *proto); +int gre_cisco_unregister(struct gre_cisco_protocol *proto); + +void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len); + +static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb, + bool csum) +{ + return iptunnel_handle_offloads(skb, csum, + csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); +} + + +static inline int ip_gre_calc_hlen(__be16 o_flags) +{ + int addend = 4; + + if (o_flags&TUNNEL_CSUM) + addend += 4; + if (o_flags&TUNNEL_KEY) + addend += 4; + if (o_flags&TUNNEL_SEQ) + addend += 4; + return addend; +} + +static inline __be16 gre_flags_to_tnl_flags(__be16 flags) +{ + __be16 tflags = 0; + + if (flags & GRE_CSUM) + tflags |= TUNNEL_CSUM; + if (flags & GRE_ROUTING) + tflags |= TUNNEL_ROUTING; + if (flags & GRE_KEY) + tflags |= TUNNEL_KEY; + if (flags & GRE_SEQ) + tflags |= TUNNEL_SEQ; + if (flags & GRE_STRICT) + tflags |= TUNNEL_STRICT; + if (flags & GRE_REC) + tflags |= TUNNEL_REC; + if (flags & GRE_VERSION) + tflags |= TUNNEL_VERSION; + + return tflags; +} + +static inline __be16 tnl_flags_to_gre_flags(__be16 tflags) +{ + __be16 flags = 0; + + if (tflags & TUNNEL_CSUM) + flags |= GRE_CSUM; + if (tflags & TUNNEL_ROUTING) + flags |= GRE_ROUTING; + if (tflags & TUNNEL_KEY) + flags |= GRE_KEY; + if (tflags & TUNNEL_SEQ) + flags |= GRE_SEQ; + if (tflags & TUNNEL_STRICT) + flags |= GRE_STRICT; + if (tflags & TUNNEL_REC) + flags |= GRE_REC; + if (tflags & TUNNEL_VERSION) + flags |= GRE_VERSION; + + return flags; +} + +#endif diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h new file mode 100644 index 00000000000..734d9b5f577 --- /dev/null +++ b/include/net/gro_cells.h @@ -0,0 +1,107 @@ +#ifndef _NET_GRO_CELLS_H +#define _NET_GRO_CELLS_H + +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/netdevice.h> + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +} ____cacheline_aligned_in_smp; + +struct gro_cells { + unsigned int gro_cells_mask; + struct gro_cell *cells; +}; + +static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) +{ + struct gro_cell *cell = gcells->cells; + struct net_device *dev = skb->dev; + + if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) { + netif_rx(skb); + return; + } + + if (skb_rx_queue_recorded(skb)) + cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask; + + if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return; + } + + /* We run in BH context */ + spin_lock(&cell->napi_skbs.lock); + + __skb_queue_tail(&cell->napi_skbs, skb); + if (skb_queue_len(&cell->napi_skbs) == 1) + napi_schedule(&cell->napi); + + spin_unlock(&cell->napi_skbs.lock); +} + +/* called unser BH context */ +static inline int gro_cell_poll(struct napi_struct *napi, int budget) +{ + struct gro_cell *cell = container_of(napi, struct gro_cell, napi); + struct sk_buff *skb; + int work_done = 0; + + spin_lock(&cell->napi_skbs.lock); + while (work_done < budget) { + skb = __skb_dequeue(&cell->napi_skbs); + if (!skb) + break; + spin_unlock(&cell->napi_skbs.lock); + napi_gro_receive(napi, skb); + work_done++; + spin_lock(&cell->napi_skbs.lock); + } + + if (work_done < budget) + napi_complete(napi); + spin_unlock(&cell->napi_skbs.lock); + return work_done; +} + +static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) +{ + int i; + + gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1; + gcells->cells = kcalloc(gcells->gro_cells_mask + 1, + sizeof(struct gro_cell), + GFP_KERNEL); + if (!gcells->cells) + return -ENOMEM; + + for (i = 0; i <= gcells->gro_cells_mask; i++) { + struct gro_cell *cell = gcells->cells + i; + + skb_queue_head_init(&cell->napi_skbs); + netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); + napi_enable(&cell->napi); + } + return 0; +} + +static inline void gro_cells_destroy(struct gro_cells *gcells) +{ + struct gro_cell *cell = gcells->cells; + int i; + + if (!cell) + return; + for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) { + netif_napi_del(&cell->napi); + skb_queue_purge(&cell->napi_skbs); + } + kfree(gcells->cells); + gcells->cells = NULL; +} + +#endif diff --git a/include/net/icmp.h b/include/net/icmp.h index 9f7ef3c8bae..970028e1338 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -25,51 +25,24 @@ struct icmp_err { int errno; - unsigned fatal:1; + unsigned int fatal:1; }; -extern struct icmp_err icmp_err_convert[]; -DECLARE_SNMP_STAT(struct icmp_mib, icmp_statistics); -DECLARE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics); -#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field) -#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field) -#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field) -#define ICMPMSGOUT_INC_STATS(field) SNMP_INC_STATS(icmpmsg_statistics, field+256) -#define ICMPMSGOUT_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmpmsg_statistics, field+256) -#define ICMPMSGOUT_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmpmsg_statistics, field+256) -#define ICMPMSGIN_INC_STATS(field) SNMP_INC_STATS(icmpmsg_statistics, field) -#define ICMPMSGIN_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmpmsg_statistics, field) -#define ICMPMSGIN_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmpmsg_statistics, field) +extern const struct icmp_err icmp_err_convert[]; +#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) +#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) +#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) +#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) struct dst_entry; struct net_proto_family; struct sk_buff; +struct net; -extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); -extern int icmp_rcv(struct sk_buff *skb); -extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); -extern void icmp_init(struct net_proto_family *ops); -extern void icmp_out_count(unsigned char type); - -/* Move into dst.h ? */ -extern int xrlim_allow(struct dst_entry *dst, int timeout); - -struct raw_sock { - /* inet_sock has to be the first member */ - struct inet_sock inet; - struct icmp_filter filter; -}; - -static inline struct raw_sock *raw_sk(const struct sock *sk) -{ - return (struct raw_sock *)sk; -} - -extern int sysctl_icmp_echo_ignore_all; -extern int sysctl_icmp_echo_ignore_broadcasts; -extern int sysctl_icmp_ignore_bogus_error_responses; -extern int sysctl_icmp_errors_use_inbound_ifaddr; -extern int sysctl_icmp_ratelimit; -extern int sysctl_icmp_ratemask; +void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); +int icmp_rcv(struct sk_buff *skb); +void icmp_err(struct sk_buff *skb, u32 info); +int icmp_init(void); +void icmp_out_count(struct net *net, unsigned char type); #endif /* _ICMP_H */ diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h deleted file mode 100644 index 285b2adfa64..00000000000 --- a/include/net/ieee80211.h +++ /dev/null @@ -1,1335 +0,0 @@ -/* - * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11 - * remains copyright by the original authors - * - * Portions of the merged code are based on Host AP (software wireless - * LAN access point) driver for Intersil Prism2/2.5/3. - * - * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen - * <j@w1.fi> - * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi> - * - * Adaption to a generic IEEE 802.11 stack by James Ketrenos - * <jketreno@linux.intel.com> - * Copyright (c) 2004-2005, Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. See README and COPYING for - * more details. - * - * API Version History - * 1.0.x -- Initial version - * 1.1.x -- Added radiotap, QoS, TIM, ieee80211_geo APIs, - * various structure changes, and crypto API init method - */ -#ifndef IEEE80211_H -#define IEEE80211_H -#include <linux/if_ether.h> /* ETH_ALEN */ -#include <linux/kernel.h> /* ARRAY_SIZE */ -#include <linux/wireless.h> - -#define IEEE80211_VERSION "git-1.1.13" - -#define IEEE80211_DATA_LEN 2304 -/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section - 6.2.1.1.2. - - The figure in section 7.1.2 suggests a body size of up to 2312 - bytes is allowed, which is a bit confusing, I suspect this - represents the 2304 bytes of real data, plus a possible 8 bytes of - WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */ - -#define IEEE80211_1ADDR_LEN 10 -#define IEEE80211_2ADDR_LEN 16 -#define IEEE80211_3ADDR_LEN 24 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_FCS_LEN 4 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -#define MIN_FRAG_THRESHOLD 256U -#define MAX_FRAG_THRESHOLD 2346U - -/* Frame control field constants */ -#define IEEE80211_FCTL_VERS 0x0003 -#define IEEE80211_FCTL_FTYPE 0x000c -#define IEEE80211_FCTL_STYPE 0x00f0 -#define IEEE80211_FCTL_TODS 0x0100 -#define IEEE80211_FCTL_FROMDS 0x0200 -#define IEEE80211_FCTL_MOREFRAGS 0x0400 -#define IEEE80211_FCTL_RETRY 0x0800 -#define IEEE80211_FCTL_PM 0x1000 -#define IEEE80211_FCTL_MOREDATA 0x2000 -#define IEEE80211_FCTL_PROTECTED 0x4000 -#define IEEE80211_FCTL_ORDER 0x8000 - -#define IEEE80211_FTYPE_MGMT 0x0000 -#define IEEE80211_FTYPE_CTL 0x0004 -#define IEEE80211_FTYPE_DATA 0x0008 - -/* management */ -#define IEEE80211_STYPE_ASSOC_REQ 0x0000 -#define IEEE80211_STYPE_ASSOC_RESP 0x0010 -#define IEEE80211_STYPE_REASSOC_REQ 0x0020 -#define IEEE80211_STYPE_REASSOC_RESP 0x0030 -#define IEEE80211_STYPE_PROBE_REQ 0x0040 -#define IEEE80211_STYPE_PROBE_RESP 0x0050 -#define IEEE80211_STYPE_BEACON 0x0080 -#define IEEE80211_STYPE_ATIM 0x0090 -#define IEEE80211_STYPE_DISASSOC 0x00A0 -#define IEEE80211_STYPE_AUTH 0x00B0 -#define IEEE80211_STYPE_DEAUTH 0x00C0 -#define IEEE80211_STYPE_ACTION 0x00D0 - -/* control */ -#define IEEE80211_STYPE_PSPOLL 0x00A0 -#define IEEE80211_STYPE_RTS 0x00B0 -#define IEEE80211_STYPE_CTS 0x00C0 -#define IEEE80211_STYPE_ACK 0x00D0 -#define IEEE80211_STYPE_CFEND 0x00E0 -#define IEEE80211_STYPE_CFENDACK 0x00F0 - -/* data */ -#define IEEE80211_STYPE_DATA 0x0000 -#define IEEE80211_STYPE_DATA_CFACK 0x0010 -#define IEEE80211_STYPE_DATA_CFPOLL 0x0020 -#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 -#define IEEE80211_STYPE_NULLFUNC 0x0040 -#define IEEE80211_STYPE_CFACK 0x0050 -#define IEEE80211_STYPE_CFPOLL 0x0060 -#define IEEE80211_STYPE_CFACKPOLL 0x0070 -#define IEEE80211_STYPE_QOS_DATA 0x0080 - -#define IEEE80211_SCTL_FRAG 0x000F -#define IEEE80211_SCTL_SEQ 0xFFF0 - -/* QOS control */ -#define IEEE80211_QCTL_TID 0x000F - -/* debug macros */ - -#ifdef CONFIG_IEEE80211_DEBUG -extern u32 ieee80211_debug_level; -#define IEEE80211_DEBUG(level, fmt, args...) \ -do { if (ieee80211_debug_level & (level)) \ - printk(KERN_DEBUG "ieee80211: %c %s " fmt, \ - in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) -static inline bool ieee80211_ratelimit_debug(u32 level) -{ - return (ieee80211_debug_level & level) && net_ratelimit(); -} -#else -#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0) -static inline bool ieee80211_ratelimit_debug(u32 level) -{ - return false; -} -#endif /* CONFIG_IEEE80211_DEBUG */ - -/* escape_essid() is intended to be used in debug (and possibly error) - * messages. It should never be used for passing essid to user space. */ -const char *escape_essid(const char *essid, u8 essid_len); - -/* - * To use the debug system: - * - * If you are defining a new debug classification, simply add it to the #define - * list here in the form of: - * - * #define IEEE80211_DL_xxxx VALUE - * - * shifting value to the left one bit from the previous entry. xxxx should be - * the name of the classification (for example, WEP) - * - * You then need to either add a IEEE80211_xxxx_DEBUG() macro definition for your - * classification, or use IEEE80211_DEBUG(IEEE80211_DL_xxxx, ...) whenever you want - * to send output to that classification. - * - * To add your debug level to the list of levels seen when you perform - * - * % cat /proc/net/ieee80211/debug_level - * - * you simply need to add your entry to the ieee80211_debug_level array. - * - * If you do not see debug_level in /proc/net/ieee80211 then you do not have - * CONFIG_IEEE80211_DEBUG defined in your kernel configuration - * - */ - -#define IEEE80211_DL_INFO (1<<0) -#define IEEE80211_DL_WX (1<<1) -#define IEEE80211_DL_SCAN (1<<2) -#define IEEE80211_DL_STATE (1<<3) -#define IEEE80211_DL_MGMT (1<<4) -#define IEEE80211_DL_FRAG (1<<5) -#define IEEE80211_DL_DROP (1<<7) - -#define IEEE80211_DL_TX (1<<8) -#define IEEE80211_DL_RX (1<<9) -#define IEEE80211_DL_QOS (1<<31) - -#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) -#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) -#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a) - -#define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a) -#define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a) -#define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a) -#define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a) -#define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a) -#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a) -#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a) -#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a) -#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a) -#include <linux/netdevice.h> -#include <linux/wireless.h> -#include <linux/if_arp.h> /* ARPHRD_ETHER */ - -#ifndef WIRELESS_SPY -#define WIRELESS_SPY /* enable iwspy support */ -#endif -#include <net/iw_handler.h> /* new driver API */ - -#ifndef ETH_P_PAE -#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ -#endif /* ETH_P_PAE */ - -#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */ - -#ifndef ETH_P_80211_RAW -#define ETH_P_80211_RAW (ETH_P_ECONET + 1) -#endif - -/* IEEE 802.11 defines */ - -#define P80211_OUI_LEN 3 - -struct ieee80211_snap_hdr { - - u8 dsap; /* always 0xAA */ - u8 ssap; /* always 0xAA */ - u8 ctrl; /* always 0x03 */ - u8 oui[P80211_OUI_LEN]; /* organizational universal id */ - -} __attribute__ ((packed)); - -#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr) - -#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) -#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) -#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) - -#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) -#define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) - -/* Authentication algorithms */ -#define WLAN_AUTH_OPEN 0 -#define WLAN_AUTH_SHARED_KEY 1 -#define WLAN_AUTH_LEAP 2 - -#define WLAN_AUTH_CHALLENGE_LEN 128 - -#define WLAN_CAPABILITY_ESS (1<<0) -#define WLAN_CAPABILITY_IBSS (1<<1) -#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) -#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) -#define WLAN_CAPABILITY_PRIVACY (1<<4) -#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) -#define WLAN_CAPABILITY_PBCC (1<<6) -#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) -#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) -#define WLAN_CAPABILITY_QOS (1<<9) -#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) -#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) - -/* 802.11g ERP information element */ -#define WLAN_ERP_NON_ERP_PRESENT (1<<0) -#define WLAN_ERP_USE_PROTECTION (1<<1) -#define WLAN_ERP_BARKER_PREAMBLE (1<<2) - -/* Status codes */ -enum ieee80211_statuscode { - WLAN_STATUS_SUCCESS = 0, - WLAN_STATUS_UNSPECIFIED_FAILURE = 1, - WLAN_STATUS_CAPS_UNSUPPORTED = 10, - WLAN_STATUS_REASSOC_NO_ASSOC = 11, - WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, - WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, - WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, - WLAN_STATUS_CHALLENGE_FAIL = 15, - WLAN_STATUS_AUTH_TIMEOUT = 16, - WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, - WLAN_STATUS_ASSOC_DENIED_RATES = 18, - /* 802.11b */ - WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, - WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, - WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, - /* 802.11h */ - WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, - WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, - WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, - /* 802.11g */ - WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, - WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, - /* 802.11i */ - WLAN_STATUS_INVALID_IE = 40, - WLAN_STATUS_INVALID_GROUP_CIPHER = 41, - WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, - WLAN_STATUS_INVALID_AKMP = 43, - WLAN_STATUS_UNSUPP_RSN_VERSION = 44, - WLAN_STATUS_INVALID_RSN_IE_CAP = 45, - WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, -}; - -/* Reason codes */ -enum ieee80211_reasoncode { - WLAN_REASON_UNSPECIFIED = 1, - WLAN_REASON_PREV_AUTH_NOT_VALID = 2, - WLAN_REASON_DEAUTH_LEAVING = 3, - WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, - WLAN_REASON_DISASSOC_AP_BUSY = 5, - WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, - WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, - WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, - WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, - /* 802.11h */ - WLAN_REASON_DISASSOC_BAD_POWER = 10, - WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, - /* 802.11i */ - WLAN_REASON_INVALID_IE = 13, - WLAN_REASON_MIC_FAILURE = 14, - WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, - WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, - WLAN_REASON_IE_DIFFERENT = 17, - WLAN_REASON_INVALID_GROUP_CIPHER = 18, - WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, - WLAN_REASON_INVALID_AKMP = 20, - WLAN_REASON_UNSUPP_RSN_VERSION = 21, - WLAN_REASON_INVALID_RSN_IE_CAP = 22, - WLAN_REASON_IEEE8021X_FAILED = 23, - WLAN_REASON_CIPHER_SUITE_REJECTED = 24, -}; - -/* Action categories - 802.11h */ -enum ieee80211_actioncategories { - WLAN_ACTION_SPECTRUM_MGMT = 0, - /* Reserved 1-127 */ - /* Error 128-255 */ -}; - -/* Action details - 802.11h */ -enum ieee80211_actiondetails { - WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0, - WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1, - WLAN_ACTION_CATEGORY_TPC_REQUEST = 2, - WLAN_ACTION_CATEGORY_TPC_REPORT = 3, - WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4, - /* 5 - 255 Reserved */ -}; - -#define IEEE80211_STATMASK_SIGNAL (1<<0) -#define IEEE80211_STATMASK_RSSI (1<<1) -#define IEEE80211_STATMASK_NOISE (1<<2) -#define IEEE80211_STATMASK_RATE (1<<3) -#define IEEE80211_STATMASK_WEMASK 0x7 - -#define IEEE80211_CCK_MODULATION (1<<0) -#define IEEE80211_OFDM_MODULATION (1<<1) - -#define IEEE80211_24GHZ_BAND (1<<0) -#define IEEE80211_52GHZ_BAND (1<<1) - -#define IEEE80211_CCK_RATE_1MB 0x02 -#define IEEE80211_CCK_RATE_2MB 0x04 -#define IEEE80211_CCK_RATE_5MB 0x0B -#define IEEE80211_CCK_RATE_11MB 0x16 -#define IEEE80211_OFDM_RATE_6MB 0x0C -#define IEEE80211_OFDM_RATE_9MB 0x12 -#define IEEE80211_OFDM_RATE_12MB 0x18 -#define IEEE80211_OFDM_RATE_18MB 0x24 -#define IEEE80211_OFDM_RATE_24MB 0x30 -#define IEEE80211_OFDM_RATE_36MB 0x48 -#define IEEE80211_OFDM_RATE_48MB 0x60 -#define IEEE80211_OFDM_RATE_54MB 0x6C -#define IEEE80211_BASIC_RATE_MASK 0x80 - -#define IEEE80211_CCK_RATE_1MB_MASK (1<<0) -#define IEEE80211_CCK_RATE_2MB_MASK (1<<1) -#define IEEE80211_CCK_RATE_5MB_MASK (1<<2) -#define IEEE80211_CCK_RATE_11MB_MASK (1<<3) -#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4) -#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5) -#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6) -#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7) -#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8) -#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9) -#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10) -#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11) - -#define IEEE80211_CCK_RATES_MASK 0x0000000F -#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \ - IEEE80211_CCK_RATE_2MB_MASK) -#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \ - IEEE80211_CCK_RATE_5MB_MASK | \ - IEEE80211_CCK_RATE_11MB_MASK) - -#define IEEE80211_OFDM_RATES_MASK 0x00000FF0 -#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \ - IEEE80211_OFDM_RATE_12MB_MASK | \ - IEEE80211_OFDM_RATE_24MB_MASK) -#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \ - IEEE80211_OFDM_RATE_9MB_MASK | \ - IEEE80211_OFDM_RATE_18MB_MASK | \ - IEEE80211_OFDM_RATE_36MB_MASK | \ - IEEE80211_OFDM_RATE_48MB_MASK | \ - IEEE80211_OFDM_RATE_54MB_MASK) -#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \ - IEEE80211_CCK_DEFAULT_RATES_MASK) - -#define IEEE80211_NUM_OFDM_RATES 8 -#define IEEE80211_NUM_CCK_RATES 4 -#define IEEE80211_OFDM_SHIFT_MASK_A 4 - -/* NOTE: This data is for statistical purposes; not all hardware provides this - * information for frames received. - * For ieee80211_rx_mgt, you need to set at least the 'len' parameter. - */ -struct ieee80211_rx_stats { - u32 mac_time; - s8 rssi; - u8 signal; - u8 noise; - u16 rate; /* in 100 kbps */ - u8 received_channel; - u8 control; - u8 mask; - u8 freq; - u16 len; - u64 tsf; - u32 beacon_time; -}; - -/* IEEE 802.11 requires that STA supports concurrent reception of at least - * three fragmented frames. This define can be increased to support more - * concurrent frames, but it should be noted that each entry can consume about - * 2 kB of RAM and increasing cache size will slow down frame reassembly. */ -#define IEEE80211_FRAG_CACHE_LEN 4 - -struct ieee80211_frag_entry { - unsigned long first_frag_time; - unsigned int seq; - unsigned int last_frag; - struct sk_buff *skb; - u8 src_addr[ETH_ALEN]; - u8 dst_addr[ETH_ALEN]; -}; - -struct ieee80211_stats { - unsigned int tx_unicast_frames; - unsigned int tx_multicast_frames; - unsigned int tx_fragments; - unsigned int tx_unicast_octets; - unsigned int tx_multicast_octets; - unsigned int tx_deferred_transmissions; - unsigned int tx_single_retry_frames; - unsigned int tx_multiple_retry_frames; - unsigned int tx_retry_limit_exceeded; - unsigned int tx_discards; - unsigned int rx_unicast_frames; - unsigned int rx_multicast_frames; - unsigned int rx_fragments; - unsigned int rx_unicast_octets; - unsigned int rx_multicast_octets; - unsigned int rx_fcs_errors; - unsigned int rx_discards_no_buffer; - unsigned int tx_discards_wrong_sa; - unsigned int rx_discards_undecryptable; - unsigned int rx_message_in_msg_fragments; - unsigned int rx_message_in_bad_msg_fragments; -}; - -struct ieee80211_device; - -#include "ieee80211_crypt.h" - -#define SEC_KEY_1 (1<<0) -#define SEC_KEY_2 (1<<1) -#define SEC_KEY_3 (1<<2) -#define SEC_KEY_4 (1<<3) -#define SEC_ACTIVE_KEY (1<<4) -#define SEC_AUTH_MODE (1<<5) -#define SEC_UNICAST_GROUP (1<<6) -#define SEC_LEVEL (1<<7) -#define SEC_ENABLED (1<<8) -#define SEC_ENCRYPT (1<<9) - -#define SEC_LEVEL_0 0 /* None */ -#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ -#define SEC_LEVEL_2 2 /* Level 1 + TKIP */ -#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */ -#define SEC_LEVEL_3 4 /* Level 2 + CCMP */ - -#define SEC_ALG_NONE 0 -#define SEC_ALG_WEP 1 -#define SEC_ALG_TKIP 2 -#define SEC_ALG_CCMP 3 - -#define WEP_KEYS 4 -#define WEP_KEY_LEN 13 -#define SCM_KEY_LEN 32 -#define SCM_TEMPORAL_KEY_LENGTH 16 - -struct ieee80211_security { - u16 active_key:2, - enabled:1, - auth_mode:2, auth_algo:4, unicast_uses_group:1, encrypt:1; - u8 encode_alg[WEP_KEYS]; - u8 key_sizes[WEP_KEYS]; - u8 keys[WEP_KEYS][SCM_KEY_LEN]; - u8 level; - u16 flags; -} __attribute__ ((packed)); - -/* - - 802.11 data frame from AP - - ,-------------------------------------------------------------------. -Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | - |------|------|---------|---------|---------|------|---------|------| -Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs | - | | tion | (BSSID) | | | ence | data | | - `-------------------------------------------------------------------' - -Total: 28-2340 bytes - -*/ - -#define BEACON_PROBE_SSID_ID_POSITION 12 - -/* Management Frame Information Element Types */ -enum ieee80211_mfie { - MFIE_TYPE_SSID = 0, - MFIE_TYPE_RATES = 1, - MFIE_TYPE_FH_SET = 2, - MFIE_TYPE_DS_SET = 3, - MFIE_TYPE_CF_SET = 4, - MFIE_TYPE_TIM = 5, - MFIE_TYPE_IBSS_SET = 6, - MFIE_TYPE_COUNTRY = 7, - MFIE_TYPE_HOP_PARAMS = 8, - MFIE_TYPE_HOP_TABLE = 9, - MFIE_TYPE_REQUEST = 10, - MFIE_TYPE_CHALLENGE = 16, - MFIE_TYPE_POWER_CONSTRAINT = 32, - MFIE_TYPE_POWER_CAPABILITY = 33, - MFIE_TYPE_TPC_REQUEST = 34, - MFIE_TYPE_TPC_REPORT = 35, - MFIE_TYPE_SUPP_CHANNELS = 36, - MFIE_TYPE_CSA = 37, - MFIE_TYPE_MEASURE_REQUEST = 38, - MFIE_TYPE_MEASURE_REPORT = 39, - MFIE_TYPE_QUIET = 40, - MFIE_TYPE_IBSS_DFS = 41, - MFIE_TYPE_ERP_INFO = 42, - MFIE_TYPE_RSN = 48, - MFIE_TYPE_RATES_EX = 50, - MFIE_TYPE_GENERIC = 221, - MFIE_TYPE_QOS_PARAMETER = 222, -}; - -/* Minimal header; can be used for passing 802.11 frames with sufficient - * information to determine what type of underlying data type is actually - * stored in the data. */ -struct ieee80211_hdr { - __le16 frame_ctl; - __le16 duration_id; - u8 payload[0]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_1addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 payload[0]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_2addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 payload[0]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_3addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 payload[0]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_4addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 addr4[ETH_ALEN]; - u8 payload[0]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_3addrqos { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 payload[0]; - __le16 qos_ctl; -} __attribute__ ((packed)); - -struct ieee80211_hdr_4addrqos { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 addr4[ETH_ALEN]; - u8 payload[0]; - __le16 qos_ctl; -} __attribute__ ((packed)); - -struct ieee80211_info_element { - u8 id; - u8 len; - u8 data[0]; -} __attribute__ ((packed)); - -/* - * These are the data types that can make up management packets - * - u16 auth_algorithm; - u16 auth_sequence; - u16 beacon_interval; - u16 capability; - u8 current_ap[ETH_ALEN]; - u16 listen_interval; - struct { - u16 association_id:14, reserved:2; - } __attribute__ ((packed)); - u32 time_stamp[2]; - u16 reason; - u16 status; -*/ - -struct ieee80211_auth { - struct ieee80211_hdr_3addr header; - __le16 algorithm; - __le16 transaction; - __le16 status; - /* challenge */ - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -struct ieee80211_channel_switch { - u8 id; - u8 len; - u8 mode; - u8 channel; - u8 count; -} __attribute__ ((packed)); - -struct ieee80211_action { - struct ieee80211_hdr_3addr header; - u8 category; - u8 action; - union { - struct ieee80211_action_exchange { - u8 token; - struct ieee80211_info_element info_element[0]; - } exchange; - struct ieee80211_channel_switch channel_switch; - - } format; -} __attribute__ ((packed)); - -struct ieee80211_disassoc { - struct ieee80211_hdr_3addr header; - __le16 reason; -} __attribute__ ((packed)); - -/* Alias deauth for disassoc */ -#define ieee80211_deauth ieee80211_disassoc - -struct ieee80211_probe_request { - struct ieee80211_hdr_3addr header; - /* SSID, supported rates */ - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -struct ieee80211_probe_response { - struct ieee80211_hdr_3addr header; - __le32 time_stamp[2]; - __le16 beacon_interval; - __le16 capability; - /* SSID, supported rates, FH params, DS params, - * CF params, IBSS params, TIM (if beacon), RSN */ - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -/* Alias beacon for probe_response */ -#define ieee80211_beacon ieee80211_probe_response - -struct ieee80211_assoc_request { - struct ieee80211_hdr_3addr header; - __le16 capability; - __le16 listen_interval; - /* SSID, supported rates, RSN */ - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -struct ieee80211_reassoc_request { - struct ieee80211_hdr_3addr header; - __le16 capability; - __le16 listen_interval; - u8 current_ap[ETH_ALEN]; - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -struct ieee80211_assoc_response { - struct ieee80211_hdr_3addr header; - __le16 capability; - __le16 status; - __le16 aid; - /* supported rates */ - struct ieee80211_info_element info_element[0]; -} __attribute__ ((packed)); - -struct ieee80211_txb { - u8 nr_frags; - u8 encrypted; - u8 rts_included; - u8 reserved; - u16 frag_size; - u16 payload_size; - struct sk_buff *fragments[0]; -}; - -/* SWEEP TABLE ENTRIES NUMBER */ -#define MAX_SWEEP_TAB_ENTRIES 42 -#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7 -/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs - * only use 8, and then use extended rates for the remaining supported - * rates. Other APs, however, stick all of their supported rates on the - * main rates information element... */ -#define MAX_RATES_LENGTH ((u8)12) -#define MAX_RATES_EX_LENGTH ((u8)16) -#define MAX_NETWORK_COUNT 128 - -#define CRC_LENGTH 4U - -#define MAX_WPA_IE_LEN 64 - -#define NETWORK_EMPTY_ESSID (1<<0) -#define NETWORK_HAS_OFDM (1<<1) -#define NETWORK_HAS_CCK (1<<2) - -/* QoS structure */ -#define NETWORK_HAS_QOS_PARAMETERS (1<<3) -#define NETWORK_HAS_QOS_INFORMATION (1<<4) -#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ - NETWORK_HAS_QOS_INFORMATION) - -/* 802.11h */ -#define NETWORK_HAS_POWER_CONSTRAINT (1<<5) -#define NETWORK_HAS_CSA (1<<6) -#define NETWORK_HAS_QUIET (1<<7) -#define NETWORK_HAS_IBSS_DFS (1<<8) -#define NETWORK_HAS_TPC_REPORT (1<<9) - -#define NETWORK_HAS_ERP_VALUE (1<<10) - -#define QOS_QUEUE_NUM 4 -#define QOS_OUI_LEN 3 -#define QOS_OUI_TYPE 2 -#define QOS_ELEMENT_ID 221 -#define QOS_OUI_INFO_SUB_TYPE 0 -#define QOS_OUI_PARAM_SUB_TYPE 1 -#define QOS_VERSION_1 1 -#define QOS_AIFSN_MIN_VALUE 2 - -struct ieee80211_qos_information_element { - u8 elementID; - u8 length; - u8 qui[QOS_OUI_LEN]; - u8 qui_type; - u8 qui_subtype; - u8 version; - u8 ac_info; -} __attribute__ ((packed)); - -struct ieee80211_qos_ac_parameter { - u8 aci_aifsn; - u8 ecw_min_max; - __le16 tx_op_limit; -} __attribute__ ((packed)); - -struct ieee80211_qos_parameter_info { - struct ieee80211_qos_information_element info_element; - u8 reserved; - struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; -} __attribute__ ((packed)); - -struct ieee80211_qos_parameters { - __le16 cw_min[QOS_QUEUE_NUM]; - __le16 cw_max[QOS_QUEUE_NUM]; - u8 aifs[QOS_QUEUE_NUM]; - u8 flag[QOS_QUEUE_NUM]; - __le16 tx_op_limit[QOS_QUEUE_NUM]; -} __attribute__ ((packed)); - -struct ieee80211_qos_data { - struct ieee80211_qos_parameters parameters; - int active; - int supported; - u8 param_count; - u8 old_param_count; -}; - -struct ieee80211_tim_parameters { - u8 tim_count; - u8 tim_period; -} __attribute__ ((packed)); - -/*******************************************************/ - -enum { /* ieee80211_basic_report.map */ - IEEE80211_BASIC_MAP_BSS = (1 << 0), - IEEE80211_BASIC_MAP_OFDM = (1 << 1), - IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), - IEEE80211_BASIC_MAP_RADAR = (1 << 3), - IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), - /* Bits 5-7 are reserved */ - -}; -struct ieee80211_basic_report { - u8 channel; - __le64 start_time; - __le16 duration; - u8 map; -} __attribute__ ((packed)); - -enum { /* ieee80211_measurement_request.mode */ - /* Bit 0 is reserved */ - IEEE80211_MEASUREMENT_ENABLE = (1 << 1), - IEEE80211_MEASUREMENT_REQUEST = (1 << 2), - IEEE80211_MEASUREMENT_REPORT = (1 << 3), - /* Bits 4-7 are reserved */ -}; - -enum { - IEEE80211_REPORT_BASIC = 0, /* required */ - IEEE80211_REPORT_CCA = 1, /* optional */ - IEEE80211_REPORT_RPI = 2, /* optional */ - /* 3-255 reserved */ -}; - -struct ieee80211_measurement_params { - u8 channel; - __le64 start_time; - __le16 duration; -} __attribute__ ((packed)); - -struct ieee80211_measurement_request { - struct ieee80211_info_element ie; - u8 token; - u8 mode; - u8 type; - struct ieee80211_measurement_params params[0]; -} __attribute__ ((packed)); - -struct ieee80211_measurement_report { - struct ieee80211_info_element ie; - u8 token; - u8 mode; - u8 type; - union { - struct ieee80211_basic_report basic[0]; - } u; -} __attribute__ ((packed)); - -struct ieee80211_tpc_report { - u8 transmit_power; - u8 link_margin; -} __attribute__ ((packed)); - -struct ieee80211_channel_map { - u8 channel; - u8 map; -} __attribute__ ((packed)); - -struct ieee80211_ibss_dfs { - struct ieee80211_info_element ie; - u8 owner[ETH_ALEN]; - u8 recovery_interval; - struct ieee80211_channel_map channel_map[0]; -}; - -struct ieee80211_csa { - u8 mode; - u8 channel; - u8 count; -} __attribute__ ((packed)); - -struct ieee80211_quiet { - u8 count; - u8 period; - u8 duration; - u8 offset; -} __attribute__ ((packed)); - -struct ieee80211_network { - /* These entries are used to identify a unique network */ - u8 bssid[ETH_ALEN]; - u8 channel; - /* Ensure null-terminated for any debug msgs */ - u8 ssid[IW_ESSID_MAX_SIZE + 1]; - u8 ssid_len; - - struct ieee80211_qos_data qos_data; - - /* These are network statistics */ - struct ieee80211_rx_stats stats; - u16 capability; - u8 rates[MAX_RATES_LENGTH]; - u8 rates_len; - u8 rates_ex[MAX_RATES_EX_LENGTH]; - u8 rates_ex_len; - unsigned long last_scanned; - u8 mode; - u32 flags; - u32 last_associate; - u32 time_stamp[2]; - u16 beacon_interval; - u16 listen_interval; - u16 atim_window; - u8 erp_value; - u8 wpa_ie[MAX_WPA_IE_LEN]; - size_t wpa_ie_len; - u8 rsn_ie[MAX_WPA_IE_LEN]; - size_t rsn_ie_len; - struct ieee80211_tim_parameters tim; - - /* 802.11h info */ - - /* Power Constraint - mandatory if spctrm mgmt required */ - u8 power_constraint; - - /* TPC Report - mandatory if spctrm mgmt required */ - struct ieee80211_tpc_report tpc_report; - - /* IBSS DFS - mandatory if spctrm mgmt required and IBSS - * NOTE: This is variable length and so must be allocated dynamically */ - struct ieee80211_ibss_dfs *ibss_dfs; - - /* Channel Switch Announcement - optional if spctrm mgmt required */ - struct ieee80211_csa csa; - - /* Quiet - optional if spctrm mgmt required */ - struct ieee80211_quiet quiet; - - struct list_head list; -}; - -enum ieee80211_state { - IEEE80211_UNINITIALIZED = 0, - IEEE80211_INITIALIZED, - IEEE80211_ASSOCIATING, - IEEE80211_ASSOCIATED, - IEEE80211_AUTHENTICATING, - IEEE80211_AUTHENTICATED, - IEEE80211_SHUTDOWN -}; - -#define DEFAULT_MAX_SCAN_AGE (15 * HZ) -#define DEFAULT_FTS 2346 - -#define CFG_IEEE80211_RESERVE_FCS (1<<0) -#define CFG_IEEE80211_COMPUTE_FCS (1<<1) -#define CFG_IEEE80211_RTS (1<<2) - -#define IEEE80211_24GHZ_MIN_CHANNEL 1 -#define IEEE80211_24GHZ_MAX_CHANNEL 14 -#define IEEE80211_24GHZ_CHANNELS (IEEE80211_24GHZ_MAX_CHANNEL - \ - IEEE80211_24GHZ_MIN_CHANNEL + 1) - -#define IEEE80211_52GHZ_MIN_CHANNEL 34 -#define IEEE80211_52GHZ_MAX_CHANNEL 165 -#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \ - IEEE80211_52GHZ_MIN_CHANNEL + 1) - -enum { - IEEE80211_CH_PASSIVE_ONLY = (1 << 0), - IEEE80211_CH_80211H_RULES = (1 << 1), - IEEE80211_CH_B_ONLY = (1 << 2), - IEEE80211_CH_NO_IBSS = (1 << 3), - IEEE80211_CH_UNIFORM_SPREADING = (1 << 4), - IEEE80211_CH_RADAR_DETECT = (1 << 5), - IEEE80211_CH_INVALID = (1 << 6), -}; - -struct ieee80211_channel { - u32 freq; /* in MHz */ - u8 channel; - u8 flags; - u8 max_power; /* in dBm */ -}; - -struct ieee80211_geo { - u8 name[4]; - u8 bg_channels; - u8 a_channels; - struct ieee80211_channel bg[IEEE80211_24GHZ_CHANNELS]; - struct ieee80211_channel a[IEEE80211_52GHZ_CHANNELS]; -}; - -struct ieee80211_device { - struct net_device *dev; - struct ieee80211_security sec; - - /* Bookkeeping structures */ - struct net_device_stats stats; - struct ieee80211_stats ieee_stats; - - struct ieee80211_geo geo; - - /* Probe / Beacon management */ - struct list_head network_free_list; - struct list_head network_list; - struct ieee80211_network *networks; - int scans; - int scan_age; - - int iw_mode; /* operating mode (IW_MODE_*) */ - struct iw_spy_data spy_data; /* iwspy support */ - - spinlock_t lock; - - int tx_headroom; /* Set to size of any additional room needed at front - * of allocated Tx SKBs */ - u32 config; - - /* WEP and other encryption related settings at the device level */ - int open_wep; /* Set to 1 to allow unencrypted frames */ - - int reset_on_keychange; /* Set to 1 if the HW needs to be reset on - * WEP key changes */ - - /* If the host performs {en,de}cryption, then set to 1 */ - int host_encrypt; - int host_encrypt_msdu; - int host_decrypt; - /* host performs multicast decryption */ - int host_mc_decrypt; - - /* host should strip IV and ICV from protected frames */ - /* meaningful only when hardware decryption is being used */ - int host_strip_iv_icv; - - int host_open_frag; - int host_build_iv; - int ieee802_1x; /* is IEEE 802.1X used */ - - /* WPA data */ - int wpa_enabled; - int drop_unencrypted; - int privacy_invoked; - size_t wpa_ie_len; - u8 *wpa_ie; - - struct list_head crypt_deinit_list; - struct ieee80211_crypt_data *crypt[WEP_KEYS]; - int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ - struct timer_list crypt_deinit_timer; - int crypt_quiesced; - - int bcrx_sta_key; /* use individual keys to override default keys even - * with RX of broad/multicast frames */ - - /* Fragmentation structures */ - struct ieee80211_frag_entry frag_cache[IEEE80211_FRAG_CACHE_LEN]; - unsigned int frag_next_idx; - u16 fts; /* Fragmentation Threshold */ - u16 rts; /* RTS threshold */ - - /* Association info */ - u8 bssid[ETH_ALEN]; - - enum ieee80211_state state; - - int mode; /* A, B, G */ - int modulation; /* CCK, OFDM */ - int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */ - int abg_true; /* ABG flag */ - - int perfect_rssi; - int worst_rssi; - - u16 prev_seq_ctl; /* used to drop duplicate frames */ - - /* Callback functions */ - void (*set_security) (struct net_device * dev, - struct ieee80211_security * sec); - int (*hard_start_xmit) (struct ieee80211_txb * txb, - struct net_device * dev, int pri); - int (*reset_port) (struct net_device * dev); - int (*is_queue_full) (struct net_device * dev, int pri); - - int (*handle_management) (struct net_device * dev, - struct ieee80211_network * network, u16 type); - int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb); - - /* Typical STA methods */ - int (*handle_auth) (struct net_device * dev, - struct ieee80211_auth * auth); - int (*handle_deauth) (struct net_device * dev, - struct ieee80211_deauth * auth); - int (*handle_action) (struct net_device * dev, - struct ieee80211_action * action, - struct ieee80211_rx_stats * stats); - int (*handle_disassoc) (struct net_device * dev, - struct ieee80211_disassoc * assoc); - int (*handle_beacon) (struct net_device * dev, - struct ieee80211_beacon * beacon, - struct ieee80211_network * network); - int (*handle_probe_response) (struct net_device * dev, - struct ieee80211_probe_response * resp, - struct ieee80211_network * network); - int (*handle_probe_request) (struct net_device * dev, - struct ieee80211_probe_request * req, - struct ieee80211_rx_stats * stats); - int (*handle_assoc_response) (struct net_device * dev, - struct ieee80211_assoc_response * resp, - struct ieee80211_network * network); - - /* Typical AP methods */ - int (*handle_assoc_request) (struct net_device * dev); - int (*handle_reassoc_request) (struct net_device * dev, - struct ieee80211_reassoc_request * req); - - /* This must be the last item so that it points to the data - * allocated beyond this structure by alloc_ieee80211 */ - u8 priv[0]; -}; - -#define IEEE_A (1<<0) -#define IEEE_B (1<<1) -#define IEEE_G (1<<2) -#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G) - -static inline void *ieee80211_priv(struct net_device *dev) -{ - return ((struct ieee80211_device *)netdev_priv(dev))->priv; -} - -static inline int ieee80211_is_empty_essid(const char *essid, int essid_len) -{ - /* Single white space is for Linksys APs */ - if (essid_len == 1 && essid[0] == ' ') - return 1; - - /* Otherwise, if the entire essid is 0, we assume it is hidden */ - while (essid_len) { - essid_len--; - if (essid[essid_len] != '\0') - return 0; - } - - return 1; -} - -static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, - int mode) -{ - /* - * It is possible for both access points and our device to support - * combinations of modes, so as long as there is one valid combination - * of ap/device supported modes, then return success - * - */ - if ((mode & IEEE_A) && - (ieee->modulation & IEEE80211_OFDM_MODULATION) && - (ieee->freq_band & IEEE80211_52GHZ_BAND)) - return 1; - - if ((mode & IEEE_G) && - (ieee->modulation & IEEE80211_OFDM_MODULATION) && - (ieee->freq_band & IEEE80211_24GHZ_BAND)) - return 1; - - if ((mode & IEEE_B) && - (ieee->modulation & IEEE80211_CCK_MODULATION) && - (ieee->freq_band & IEEE80211_24GHZ_BAND)) - return 1; - - return 0; -} - -static inline int ieee80211_get_hdrlen(u16 fc) -{ - int hdrlen = IEEE80211_3ADDR_LEN; - u16 stype = WLAN_FC_GET_STYPE(fc); - - switch (WLAN_FC_GET_TYPE(fc)) { - case IEEE80211_FTYPE_DATA: - if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) - hdrlen = IEEE80211_4ADDR_LEN; - if (stype & IEEE80211_STYPE_QOS_DATA) - hdrlen += 2; - break; - case IEEE80211_FTYPE_CTL: - switch (WLAN_FC_GET_STYPE(fc)) { - case IEEE80211_STYPE_CTS: - case IEEE80211_STYPE_ACK: - hdrlen = IEEE80211_1ADDR_LEN; - break; - default: - hdrlen = IEEE80211_2ADDR_LEN; - break; - } - break; - } - - return hdrlen; -} - -static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr) -{ - switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) { - case IEEE80211_1ADDR_LEN: - return ((struct ieee80211_hdr_1addr *)hdr)->payload; - case IEEE80211_2ADDR_LEN: - return ((struct ieee80211_hdr_2addr *)hdr)->payload; - case IEEE80211_3ADDR_LEN: - return ((struct ieee80211_hdr_3addr *)hdr)->payload; - case IEEE80211_4ADDR_LEN: - return ((struct ieee80211_hdr_4addr *)hdr)->payload; - } - return NULL; -} - -static inline int ieee80211_is_ofdm_rate(u8 rate) -{ - switch (rate & ~IEEE80211_BASIC_RATE_MASK) { - case IEEE80211_OFDM_RATE_6MB: - case IEEE80211_OFDM_RATE_9MB: - case IEEE80211_OFDM_RATE_12MB: - case IEEE80211_OFDM_RATE_18MB: - case IEEE80211_OFDM_RATE_24MB: - case IEEE80211_OFDM_RATE_36MB: - case IEEE80211_OFDM_RATE_48MB: - case IEEE80211_OFDM_RATE_54MB: - return 1; - } - return 0; -} - -static inline int ieee80211_is_cck_rate(u8 rate) -{ - switch (rate & ~IEEE80211_BASIC_RATE_MASK) { - case IEEE80211_CCK_RATE_1MB: - case IEEE80211_CCK_RATE_2MB: - case IEEE80211_CCK_RATE_5MB: - case IEEE80211_CCK_RATE_11MB: - return 1; - } - return 0; -} - -/* ieee80211.c */ -extern void free_ieee80211(struct net_device *dev); -extern struct net_device *alloc_ieee80211(int sizeof_priv); - -extern int ieee80211_set_encryption(struct ieee80211_device *ieee); - -/* ieee80211_tx.c */ -extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); -extern void ieee80211_txb_free(struct ieee80211_txb *); -extern int ieee80211_tx_frame(struct ieee80211_device *ieee, - struct ieee80211_hdr *frame, int hdr_len, - int total_len, int encrypt_mpdu); - -/* ieee80211_rx.c */ -extern void ieee80211_rx_any(struct ieee80211_device *ieee, - struct sk_buff *skb, struct ieee80211_rx_stats *stats); -extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, - struct ieee80211_rx_stats *rx_stats); -/* make sure to set stats->len */ -extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, - struct ieee80211_hdr_4addr *header, - struct ieee80211_rx_stats *stats); -extern void ieee80211_network_reset(struct ieee80211_network *network); - -/* ieee80211_geo.c */ -extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device - *ieee); -extern int ieee80211_set_geo(struct ieee80211_device *ieee, - const struct ieee80211_geo *geo); - -extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, - u8 channel); -extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, - u8 channel); -extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); -extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee, - u8 channel); -extern const struct ieee80211_channel *ieee80211_get_channel(struct - ieee80211_device - *ieee, u8 channel); -extern u32 ieee80211_channel_to_freq(struct ieee80211_device * ieee, - u8 channel); - -/* ieee80211_wx.c */ -extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, - struct iw_request_info *info, - union iwreq_data *wrqu, char *key); -extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee, - struct iw_request_info *info, - union iwreq_data *wrqu, char *key); -extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee, - struct iw_request_info *info, - union iwreq_data *wrqu, char *key); -extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -extern int ieee80211_wx_set_auth(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra); -extern int ieee80211_wx_get_auth(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra); - -static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) -{ - ieee->scans++; -} - -static inline int ieee80211_get_scans(struct ieee80211_device *ieee) -{ - return ieee->scans; -} - -#endif /* IEEE80211_H */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index dfd8bf66ce2..b0fd9476c53 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,7 +1,4 @@ -/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */ -/* $NetBSD: ieee80211_radiotap.h,v 1.11 2005/06/22 06:16:02 dyoung Exp $ */ - -/*- +/* * Copyright (c) 2003, 2004 David Young. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -42,8 +39,6 @@ #include <linux/kernel.h> #include <asm/unaligned.h> -/* Radiotap header version (from official NetBSD feed) */ -#define IEEE80211RADIOTAP_VERSION "1.5" /* Base version of the radiotap packet header data */ #define PKTHDR_RADIOTAP_VERSION 0 @@ -62,12 +57,8 @@ * readers. */ -/* XXX tcpdump/libpcap do not tolerate variable-length headers, - * yet, so we pad every radiotap header to 64 bytes. Ugh. - */ -#define IEEE80211_RADIOTAP_HDRLEN 64 - -/* The radio capture header precedes the 802.11 header. +/* + * The radio capture header precedes the 802.11 header. * All data in the header is little endian on all platforms. */ struct ieee80211_radiotap_header { @@ -89,7 +80,7 @@ struct ieee80211_radiotap_header { * Additional extensions are made * by setting bit 31. */ -}; +} __packed; /* Name Data type Units * ---- --------- ----- @@ -187,6 +178,18 @@ struct ieee80211_radiotap_header { * * Number of unicast retries a transmitted frame used. * + * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless + * + * Contains a bitmap of known fields/flags, the flags, and + * the MCS index. + * + * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless + * + * Contains the AMPDU information for the subframe. + * + * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 + * + * Contains VHT information about this frame. */ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TSFT = 0, @@ -207,6 +210,14 @@ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TX_FLAGS = 15, IEEE80211_RADIOTAP_RTS_RETRIES = 16, IEEE80211_RADIOTAP_DATA_RETRIES = 17, + + IEEE80211_RADIOTAP_MCS = 19, + IEEE80211_RADIOTAP_AMPDU_STATUS = 20, + IEEE80211_RADIOTAP_VHT = 21, + + /* valid in every it_present bitmap, even vendor namespaces */ + IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, IEEE80211_RADIOTAP_EXT = 31 }; @@ -219,6 +230,10 @@ enum ieee80211_radiotap_type { #define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ #define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ #define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ +#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */ +#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */ +#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */ +#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */ /* For IEEE80211_RADIOTAP_FLAGS */ #define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received @@ -239,22 +254,72 @@ enum ieee80211_radiotap_type { * 802.11 header and payload * (to 32-bit boundary) */ +#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */ + /* For IEEE80211_RADIOTAP_RX_FLAGS */ -#define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */ +#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */ /* For IEEE80211_RADIOTAP_TX_FLAGS */ #define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive * retries */ #define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */ #define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */ +#define IEEE80211_RADIOTAP_F_TX_NOACK 0x0008 /* don't expect an ack */ + + +/* For IEEE80211_RADIOTAP_MCS */ +#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01 +#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02 +#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 +#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 +#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 +#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 + +#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 +#define IEEE80211_RADIOTAP_MCS_BW_20 0 +#define IEEE80211_RADIOTAP_MCS_BW_40 1 +#define IEEE80211_RADIOTAP_MCS_BW_20L 2 +#define IEEE80211_RADIOTAP_MCS_BW_20U 3 +#define IEEE80211_RADIOTAP_MCS_SGI 0x04 +#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 +#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 +#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 +#define IEEE80211_RADIOTAP_MCS_STBC_1 1 +#define IEEE80211_RADIOTAP_MCS_STBC_2 2 +#define IEEE80211_RADIOTAP_MCS_STBC_3 3 + +#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 + +/* For IEEE80211_RADIOTAP_AMPDU_STATUS */ +#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 +#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 +#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 +#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 +#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 +#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 + +/* For IEEE80211_RADIOTAP_VHT */ +#define IEEE80211_RADIOTAP_VHT_KNOWN_STBC 0x0001 +#define IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA 0x0002 +#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004 +#define IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS 0x0008 +#define IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM 0x0010 +#define IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED 0x0020 +#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040 +#define IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID 0x0080 +#define IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID 0x0100 + +#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01 +#define IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA 0x02 +#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04 +#define IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 0x08 +#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10 +#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20 -/* Ugly macro to convert literal channel numbers into their mhz equivalents - * There are certianly some conditions that will break this (like feeding it '30') - * but they shouldn't arise since nothing talks on channel 30. */ -#define ieee80211chan2mhz(x) \ - (((x) <= 14) ? \ - (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ - ((x) + 1000) * 5) +#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01 +#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02 +#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04 +#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08 /* helpers */ static inline int ieee80211_get_radiotap_len(unsigned char *data) @@ -262,7 +327,7 @@ static inline int ieee80211_get_radiotap_len(unsigned char *data) struct ieee80211_radiotap_header *hdr = (struct ieee80211_radiotap_header *)data; - return le16_to_cpu(get_unaligned(&hdr->it_len)); + return get_unaligned_le16(&hdr->it_len); } #endif /* IEEE80211_RADIOTAP_H */ diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h deleted file mode 100644 index 1ef6282fdde..00000000000 --- a/include/net/ieee80211softmac.h +++ /dev/null @@ -1,373 +0,0 @@ -/* - * ieee80211softmac.h - public interface to the softmac - * - * Copyright (c) 2005 Johannes Berg <johannes@sipsolutions.net> - * Joseph Jezak <josejx@gentoo.org> - * Larry Finger <Larry.Finger@lwfinger.net> - * Danny van Dyk <kugelfang@gentoo.org> - * Michael Buesch <mbuesch@freenet.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ - -#ifndef IEEE80211SOFTMAC_H_ -#define IEEE80211SOFTMAC_H_ - -#include <linux/kernel.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/list.h> -#include <net/ieee80211.h> - -/* Once the API is considered more or less stable, - * this should be incremented on API incompatible changes. - */ -#define IEEE80211SOFTMAC_API 0 - -#define IEEE80211SOFTMAC_MAX_RATES_LEN 8 -#define IEEE80211SOFTMAC_MAX_EX_RATES_LEN 255 - -struct ieee80211softmac_ratesinfo { - u8 count; - u8 rates[IEEE80211SOFTMAC_MAX_RATES_LEN + IEEE80211SOFTMAC_MAX_EX_RATES_LEN]; -}; - -/* internal structures */ -struct ieee80211softmac_network; -struct ieee80211softmac_scaninfo; - -struct ieee80211softmac_essid { - u8 len; - char data[IW_ESSID_MAX_SIZE+1]; -}; - -struct ieee80211softmac_wpa { - char *IE; - int IElen; - int IEbuflen; -}; - -/* - * Information about association - */ -struct ieee80211softmac_assoc_info { - - struct mutex mutex; - - /* - * This is the requested ESSID. It is written - * only by the WX handlers. - * - */ - struct ieee80211softmac_essid req_essid; - /* - * the ESSID of the network we're currently - * associated (or trying) to. This is - * updated to the network's actual ESSID - * even if the requested ESSID was 'ANY' - */ - struct ieee80211softmac_essid associate_essid; - - /* BSSID we're trying to associate to */ - char bssid[ETH_ALEN]; - - /* some flags. - * static_essid is valid if the essid is constant, - * this is for use by the wx handlers only. - * - * associating is true, if the network has been - * auth'ed on and we are in the process of associating. - * - * bssvalid is true if we found a matching network - * and saved it's BSSID into the bssid above. - * - * bssfixed is used for SIOCSIWAP. - */ - u8 static_essid; - u8 short_preamble_available; - u8 associating; - u8 associated; - u8 assoc_wait; - u8 bssvalid; - u8 bssfixed; - - /* Scan retries remaining */ - int scan_retry; - - struct delayed_work work; - struct delayed_work timeout; -}; - -struct ieee80211softmac_bss_info { - /* Rates supported by the network */ - struct ieee80211softmac_ratesinfo supported_rates; - - /* This indicates whether frames can currently be transmitted with - * short preamble (only use this variable during TX at CCK rates) */ - u8 short_preamble:1; - - /* This indicates whether protection (e.g. self-CTS) should be used - * when transmitting with OFDM modulation */ - u8 use_protection:1; -}; - -enum { - IEEE80211SOFTMAC_AUTH_OPEN_REQUEST = 1, - IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE = 2, -}; - -enum { - IEEE80211SOFTMAC_AUTH_SHARED_REQUEST = 1, - IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE = 2, - IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE = 3, - IEEE80211SOFTMAC_AUTH_SHARED_PASS = 4, -}; - -/* We should make these tunable - * AUTH_TIMEOUT seems really long, but that's what it is in BSD */ -#define IEEE80211SOFTMAC_AUTH_TIMEOUT (12 * HZ) -#define IEEE80211SOFTMAC_AUTH_RETRY_LIMIT 5 -#define IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT 3 - -struct ieee80211softmac_txrates { - /* The Bit-Rate to be used for multicast frames. */ - u8 mcast_rate; - - /* The Bit-Rate to be used for multicast management frames. */ - u8 mgt_mcast_rate; - - /* The Bit-Rate to be used for any other (normal) data packet. */ - u8 default_rate; - /* The Bit-Rate to be used for default fallback - * (If the device supports fallback and hardware-retry) - */ - u8 default_fallback; - - /* This is the rate that the user asked for */ - u8 user_rate; -}; - -/* Bits for txrates_change callback. */ -#define IEEE80211SOFTMAC_TXRATECHG_DEFAULT (1 << 0) /* default_rate */ -#define IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK (1 << 1) /* default_fallback */ -#define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ -#define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */ - -#define IEEE80211SOFTMAC_BSSINFOCHG_RATES (1 << 0) /* supported_rates */ -#define IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE (1 << 1) /* short_preamble */ -#define IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION (1 << 2) /* use_protection */ - -struct ieee80211softmac_device { - /* 802.11 structure for data stuff */ - struct ieee80211_device *ieee; - struct net_device *dev; - - /* only valid if associated, then holds the Association ID */ - u16 association_id; - - /* the following methods are callbacks that the driver - * using this framework has to assign - */ - - /* always assign these */ - void (*set_bssid_filter)(struct net_device *dev, const u8 *bssid); - void (*set_channel)(struct net_device *dev, u8 channel); - - /* assign if you need it, informational only */ - void (*link_change)(struct net_device *dev); - - /* If the hardware can do scanning, assign _all_ three of these callbacks. - * When the scan finishes, call ieee80211softmac_scan_finished(). - */ - - /* when called, start_scan is guaranteed to not be called again - * until you call ieee80211softmac_scan_finished. - * Return 0 if scanning could start, error otherwise. - * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_start_scan */ - int (*start_scan)(struct net_device *dev); - /* this should block until after ieee80211softmac_scan_finished was called - * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_wait_for_scan */ - void (*wait_for_scan)(struct net_device *dev); - /* stop_scan aborts a scan, but is asynchronous. - * if you want to wait for it too, use wait_for_scan - * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_stop_scan */ - void (*stop_scan)(struct net_device *dev); - - /* we'll need something about beacons here too, for AP or ad-hoc modes */ - - /* Transmission rates to be used by the driver. - * The SoftMAC figures out the best possible rates. - * The driver just needs to read them. - */ - struct ieee80211softmac_txrates txrates; - - /* If the driver needs to do stuff on TX rate changes, assign this - * callback. See IEEE80211SOFTMAC_TXRATECHG for change flags. */ - void (*txrates_change)(struct net_device *dev, - u32 changes); - - /* If the driver needs to do stuff when BSS properties change, assign - * this callback. see IEEE80211SOFTMAC_BSSINFOCHG for change flags. */ - void (*bssinfo_change)(struct net_device *dev, - u32 changes); - - /* private stuff follows */ - /* this lock protects this structure */ - spinlock_t lock; - - struct workqueue_struct *wq; - - u8 running; /* SoftMAC started? */ - u8 scanning; - - struct ieee80211softmac_scaninfo *scaninfo; - struct ieee80211softmac_assoc_info associnfo; - struct ieee80211softmac_bss_info bssinfo; - - struct list_head auth_queue; - struct list_head events; - - struct ieee80211softmac_ratesinfo ratesinfo; - int txrate_badness; - - /* WPA stuff */ - struct ieee80211softmac_wpa wpa; - - /* we need to keep a list of network structs we copied */ - struct list_head network_list; - - /* This must be the last item so that it points to the data - * allocated beyond this structure by alloc_ieee80211 */ - u8 priv[0]; -}; - -extern void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm); - -static inline void * ieee80211softmac_priv(struct net_device *dev) -{ - return ((struct ieee80211softmac_device *)ieee80211_priv(dev))->priv; -} - -extern struct net_device * alloc_ieee80211softmac(int sizeof_priv); -extern void free_ieee80211softmac(struct net_device *dev); - -/* Call this function if you detect a lost TX fragment. - * (If the device indicates failure of ACK RX, for example.) - * It is wise to call this function if you are able to detect lost packets, - * because it contributes to the TX Rates auto adjustment. - */ -extern void ieee80211softmac_fragment_lost(struct net_device *dev, - u16 wireless_sequence_number); -/* Call this function before _start to tell the softmac what rates - * the hw supports. The rates parameter is copied, so you can - * free it right after calling this function. - * Note that the rates need to be sorted. */ -extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); - -/* Finds the highest rate which is: - * 1. Present in ri (optionally a basic rate) - * 2. Supported by the device - * 3. Less than or equal to the user-defined rate - */ -extern u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac, - struct ieee80211softmac_ratesinfo *ri, int basic_only); - -/* Helper function which advises you the rate at which a frame should be - * transmitted at. */ -static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac, - int is_multicast, - int is_mgt) -{ - struct ieee80211softmac_txrates *txrates = &mac->txrates; - - if (!mac->associnfo.associated) - return txrates->mgt_mcast_rate; - - /* We are associated, sending unicast frame */ - if (!is_multicast) - return txrates->default_rate; - - /* We are associated, sending multicast frame */ - if (is_mgt) - return txrates->mgt_mcast_rate; - else - return txrates->mcast_rate; -} - -/* Helper function which advises you when it is safe to transmit with short - * preamble. - * You should only call this function when transmitting at CCK rates. */ -static inline int ieee80211softmac_short_preamble_ok(struct ieee80211softmac_device *mac, - int is_multicast, - int is_mgt) -{ - return (is_multicast && is_mgt) ? 0 : mac->bssinfo.short_preamble; -} - -/* Helper function which advises you whether protection (e.g. self-CTS) is - * needed. 1 = protection needed, 0 = no protection needed - * Only use this function when transmitting with OFDM modulation. */ -static inline int ieee80211softmac_protection_needed(struct ieee80211softmac_device *mac) -{ - return mac->bssinfo.use_protection; -} - -/* Start the SoftMAC. Call this after you initialized the device - * and it is ready to run. - */ -extern void ieee80211softmac_start(struct net_device *dev); -/* Stop the SoftMAC. Call this before you shutdown the device. */ -extern void ieee80211softmac_stop(struct net_device *dev); - -/* - * Event system - */ - -/* valid event types */ -#define IEEE80211SOFTMAC_EVENT_ANY -1 /*private use only*/ -#define IEEE80211SOFTMAC_EVENT_SCAN_FINISHED 0 -#define IEEE80211SOFTMAC_EVENT_ASSOCIATED 1 -#define IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED 2 -#define IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT 3 -#define IEEE80211SOFTMAC_EVENT_AUTHENTICATED 4 -#define IEEE80211SOFTMAC_EVENT_AUTH_FAILED 5 -#define IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT 6 -#define IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND 7 -#define IEEE80211SOFTMAC_EVENT_DISASSOCIATED 8 -/* keep this updated! */ -#define IEEE80211SOFTMAC_EVENT_LAST 8 -/* - * If you want to be notified of certain events, you can call - * ieee80211softmac_notify[_atomic] with - * - event set to one of the constants below - * - fun set to a function pointer of the appropriate type - * - context set to the context data you want passed - * The return value is 0, or an error. - */ -typedef void (*notify_function_ptr)(struct net_device *dev, int event_type, void *context); - -#define ieee80211softmac_notify(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_KERNEL); -#define ieee80211softmac_notify_atomic(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_ATOMIC); - -extern int ieee80211softmac_notify_gfp(struct net_device *dev, - int event, notify_function_ptr fun, void *context, gfp_t gfp_mask); - -/* To clear pending work (for ifconfig down, etc.) */ -extern void -ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm); - -#endif /* IEEE80211SOFTMAC_H_ */ diff --git a/include/net/ieee80211softmac_wx.h b/include/net/ieee80211softmac_wx.h deleted file mode 100644 index 4ee3ad57283..00000000000 --- a/include/net/ieee80211softmac_wx.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * This file contains the prototypes for the wireless extension - * handlers that the softmac API provides. Include this file to - * use the wx handlers, you can assign these directly. - * - * Copyright (c) 2005 Johannes Berg <johannes@sipsolutions.net> - * Joseph Jezak <josejx@gentoo.org> - * Larry Finger <Larry.Finger@lwfinger.net> - * Danny van Dyk <kugelfang@gentoo.org> - * Michael Buesch <mbuesch@freenet.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ - -#ifndef _IEEE80211SOFTMAC_WX_H -#define _IEEE80211SOFTMAC_WX_H - -#include <net/ieee80211softmac.h> -#include <net/iw_handler.h> - -extern int -ieee80211softmac_wx_trigger_scan(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_get_scan_results(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_set_essid(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_get_essid(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_set_rate(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_get_rate(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_get_wap(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_set_wap(struct net_device *net_dev, - struct iw_request_info *info, - union iwreq_data *data, - char *extra); - -extern int -ieee80211softmac_wx_set_genie(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra); - -extern int -ieee80211softmac_wx_get_genie(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra); -extern int -ieee80211softmac_wx_set_mlme(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra); -#endif /* _IEEE80211SOFTMAC_WX */ diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h new file mode 100644 index 00000000000..0aa7122e8f1 --- /dev/null +++ b/include/net/ieee802154.h @@ -0,0 +1,195 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007, 2008 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Maxim Osipov <maxim.osipov@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#ifndef NET_IEEE802154_H +#define NET_IEEE802154_H + +#define IEEE802154_MTU 127 + +#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ +#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ +#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ +#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ + +#define IEEE802154_FC_TYPE_SHIFT 0 +#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) +#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) +#define IEEE802154_FC_SET_TYPE(v, x) do { \ + v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ + (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ + } while (0) + +#define IEEE802154_FC_SECEN_SHIFT 3 +#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) +#define IEEE802154_FC_FRPEND_SHIFT 4 +#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) +#define IEEE802154_FC_ACK_REQ_SHIFT 5 +#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) +#define IEEE802154_FC_INTRA_PAN_SHIFT 6 +#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) + +#define IEEE802154_FC_SAMODE_SHIFT 14 +#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) +#define IEEE802154_FC_DAMODE_SHIFT 10 +#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_FC_VERSION_SHIFT 12 +#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) +#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) + +#define IEEE802154_FC_SAMODE(x) \ + (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) + +#define IEEE802154_FC_DAMODE(x) \ + (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_SCF_SECLEVEL_MASK 7 +#define IEEE802154_SCF_SECLEVEL_SHIFT 0 +#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) +#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 +#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) +#define IEEE802154_SCF_KEY_ID_MODE(x) \ + ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) + +#define IEEE802154_SCF_KEY_IMPLICIT 0 +#define IEEE802154_SCF_KEY_INDEX 1 +#define IEEE802154_SCF_KEY_SHORT_INDEX 2 +#define IEEE802154_SCF_KEY_HW_INDEX 3 + +#define IEEE802154_SCF_SECLEVEL_NONE 0 +#define IEEE802154_SCF_SECLEVEL_MIC32 1 +#define IEEE802154_SCF_SECLEVEL_MIC64 2 +#define IEEE802154_SCF_SECLEVEL_MIC128 3 +#define IEEE802154_SCF_SECLEVEL_ENC 4 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 + +/* MAC footer size */ +#define IEEE802154_MFR_SIZE 2 /* 2 octets */ + +/* MAC's Command Frames Identifiers */ +#define IEEE802154_CMD_ASSOCIATION_REQ 0x01 +#define IEEE802154_CMD_ASSOCIATION_RESP 0x02 +#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 +#define IEEE802154_CMD_DATA_REQ 0x04 +#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 +#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 +#define IEEE802154_CMD_BEACON_REQ 0x07 +#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 +#define IEEE802154_CMD_GTS_REQ 0x09 + +/* + * The return values of MAC operations + */ +enum { + /* + * The requested operation was completed successfully. + * For a transmission request, this value indicates + * a successful transmission. + */ + IEEE802154_SUCCESS = 0x0, + + /* The beacon was lost following a synchronization request. */ + IEEE802154_BEACON_LOSS = 0xe0, + /* + * A transmission could not take place due to activity on the + * channel, i.e., the CSMA-CA mechanism has failed. + */ + IEEE802154_CHNL_ACCESS_FAIL = 0xe1, + /* The GTS request has been denied by the PAN coordinator. */ + IEEE802154_DENINED = 0xe2, + /* The attempt to disable the transceiver has failed. */ + IEEE802154_DISABLE_TRX_FAIL = 0xe3, + /* + * The received frame induces a failed security check according to + * the security suite. + */ + IEEE802154_FAILED_SECURITY_CHECK = 0xe4, + /* + * The frame resulting from secure processing has a length that is + * greater than aMACMaxFrameSize. + */ + IEEE802154_FRAME_TOO_LONG = 0xe5, + /* + * The requested GTS transmission failed because the specified GTS + * either did not have a transmit GTS direction or was not defined. + */ + IEEE802154_INVALID_GTS = 0xe6, + /* + * A request to purge an MSDU from the transaction queue was made using + * an MSDU handle that was not found in the transaction table. + */ + IEEE802154_INVALID_HANDLE = 0xe7, + /* A parameter in the primitive is out of the valid range.*/ + IEEE802154_INVALID_PARAMETER = 0xe8, + /* No acknowledgment was received after aMaxFrameRetries. */ + IEEE802154_NO_ACK = 0xe9, + /* A scan operation failed to find any network beacons.*/ + IEEE802154_NO_BEACON = 0xea, + /* No response data were available following a request. */ + IEEE802154_NO_DATA = 0xeb, + /* The operation failed because a short address was not allocated. */ + IEEE802154_NO_SHORT_ADDRESS = 0xec, + /* + * A receiver enable request was unsuccessful because it could not be + * completed within the CAP. + */ + IEEE802154_OUT_OF_CAP = 0xed, + /* + * A PAN identifier conflict has been detected and communicated to the + * PAN coordinator. + */ + IEEE802154_PANID_CONFLICT = 0xee, + /* A coordinator realignment command has been received. */ + IEEE802154_REALIGMENT = 0xef, + /* The transaction has expired and its information discarded. */ + IEEE802154_TRANSACTION_EXPIRED = 0xf0, + /* There is no capacity to store the transaction. */ + IEEE802154_TRANSACTION_OVERFLOW = 0xf1, + /* + * The transceiver was in the transmitter enabled state when the + * receiver was requested to be enabled. + */ + IEEE802154_TX_ACTIVE = 0xf2, + /* The appropriate key is not available in the ACL. */ + IEEE802154_UNAVAILABLE_KEY = 0xf3, + /* + * A SET/GET request was issued with the identifier of a PIB attribute + * that is not supported. + */ + IEEE802154_UNSUPPORTED_ATTR = 0xf4, + /* + * A request to perform a scan operation failed because the MLME was + * in the process of performing a previously initiated scan operation. + */ + IEEE802154_SCAN_IN_PROGRESS = 0xfc, +}; + + +#endif + + diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h new file mode 100644 index 00000000000..3b53c8e405e --- /dev/null +++ b/include/net/ieee802154_netdev.h @@ -0,0 +1,463 @@ +/* + * An interface between IEEE802.15.4 device and rest of the kernel. + * + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Maxim Osipov <maxim.osipov@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#ifndef IEEE802154_NETDEVICE_H +#define IEEE802154_NETDEVICE_H + +#include <net/ieee802154.h> +#include <net/af_ieee802154.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +struct ieee802154_sechdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 level:3, + key_id_mode:2, + reserved:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:3, + key_id_mode:2, + level:3; +#else +#error "Please fix <asm/byteorder.h>" +#endif + u8 key_id; + __le32 frame_counter; + union { + __le32 short_src; + __le64 extended_src; + }; +}; + +struct ieee802154_addr { + u8 mode; + __le16 pan_id; + union { + __le16 short_addr; + __le64 extended_addr; + }; +}; + +struct ieee802154_hdr_fc { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 type:3, + security_enabled:1, + frame_pending:1, + ack_request:1, + intra_pan:1, + reserved:3, + dest_addr_mode:2, + version:2, + source_addr_mode:2; +#elif defined(__BIG_ENDIAN_BITFIELD) + u16 reserved:1, + intra_pan:1, + ack_request:1, + frame_pending:1, + security_enabled:1, + type:3, + source_addr_mode:2, + version:2, + dest_addr_mode:2, + reserved2:2; +#else +#error "Please fix <asm/byteorder.h>" +#endif +}; + +struct ieee802154_hdr { + struct ieee802154_hdr_fc fc; + u8 seq; + struct ieee802154_addr source; + struct ieee802154_addr dest; + struct ieee802154_sechdr sec; +}; + +/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from + * the contents of hdr will be, and the actual value of those bits in + * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame + * version, if SECEN is set. + */ +int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr); + +/* pulls the entire 802.15.4 header off of the skb, including the security + * header, and performs pan id decompression + */ +int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr); + +/* parses the frame control, sequence number of address fields in a given skb + * and stores them into hdr, performing pan id decompression and length checks + * to be suitable for use in header_ops.parse + */ +int ieee802154_hdr_peek_addrs(const struct sk_buff *skb, + struct ieee802154_hdr *hdr); + +/* parses the full 802.15.4 header a given skb and stores them into hdr, + * performing pan id decompression and length checks to be suitable for use in + * header_ops.parse + */ +int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr); + +int ieee802154_max_payload(const struct ieee802154_hdr *hdr); + +static inline int +ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec) +{ + switch (sec->level) { + case IEEE802154_SCF_SECLEVEL_MIC32: + case IEEE802154_SCF_SECLEVEL_ENC_MIC32: + return 4; + case IEEE802154_SCF_SECLEVEL_MIC64: + case IEEE802154_SCF_SECLEVEL_ENC_MIC64: + return 8; + case IEEE802154_SCF_SECLEVEL_MIC128: + case IEEE802154_SCF_SECLEVEL_ENC_MIC128: + return 16; + case IEEE802154_SCF_SECLEVEL_NONE: + case IEEE802154_SCF_SECLEVEL_ENC: + default: + return 0; + } +} + +static inline int ieee802154_hdr_length(struct sk_buff *skb) +{ + struct ieee802154_hdr hdr; + int len = ieee802154_hdr_pull(skb, &hdr); + + if (len > 0) + skb_push(skb, len); + + return len; +} + +static inline bool ieee802154_addr_equal(const struct ieee802154_addr *a1, + const struct ieee802154_addr *a2) +{ + if (a1->pan_id != a2->pan_id || a1->mode != a2->mode) + return false; + + if ((a1->mode == IEEE802154_ADDR_LONG && + a1->extended_addr != a2->extended_addr) || + (a1->mode == IEEE802154_ADDR_SHORT && + a1->short_addr != a2->short_addr)) + return false; + + return true; +} + +static inline __le64 ieee802154_devaddr_from_raw(const void *raw) +{ + u64 temp; + + memcpy(&temp, raw, IEEE802154_ADDR_LEN); + return (__force __le64)swab64(temp); +} + +static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr) +{ + u64 temp = swab64((__force u64)addr); + + memcpy(raw, &temp, IEEE802154_ADDR_LEN); +} + +static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a, + const struct ieee802154_addr_sa *sa) +{ + a->mode = sa->addr_type; + a->pan_id = cpu_to_le16(sa->pan_id); + + switch (a->mode) { + case IEEE802154_ADDR_SHORT: + a->short_addr = cpu_to_le16(sa->short_addr); + break; + case IEEE802154_ADDR_LONG: + a->extended_addr = ieee802154_devaddr_from_raw(sa->hwaddr); + break; + } +} + +static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa, + const struct ieee802154_addr *a) +{ + sa->addr_type = a->mode; + sa->pan_id = le16_to_cpu(a->pan_id); + + switch (a->mode) { + case IEEE802154_ADDR_SHORT: + sa->short_addr = le16_to_cpu(a->short_addr); + break; + case IEEE802154_ADDR_LONG: + ieee802154_devaddr_to_raw(sa->hwaddr, a->extended_addr); + break; + } +} + +/* + * A control block of skb passed between the ARPHRD_IEEE802154 device + * and other stack parts. + */ +struct ieee802154_mac_cb { + u8 lqi; + u8 type; + bool ackreq; + bool secen; + bool secen_override; + u8 seclevel; + bool seclevel_override; + struct ieee802154_addr source; + struct ieee802154_addr dest; +}; + +static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb) +{ + return (struct ieee802154_mac_cb *)skb->cb; +} + +static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb)); + + memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb)); + return mac_cb(skb); +} + +#define IEEE802154_LLSEC_KEY_SIZE 16 + +struct ieee802154_llsec_key_id { + u8 mode; + u8 id; + union { + struct ieee802154_addr device_addr; + __le32 short_source; + __le64 extended_source; + }; +}; + +struct ieee802154_llsec_key { + u8 frame_types; + u32 cmd_frame_ids; + u8 key[IEEE802154_LLSEC_KEY_SIZE]; +}; + +struct ieee802154_llsec_key_entry { + struct list_head list; + + struct ieee802154_llsec_key_id id; + struct ieee802154_llsec_key *key; +}; + +struct ieee802154_llsec_device_key { + struct list_head list; + + struct ieee802154_llsec_key_id key_id; + u32 frame_counter; +}; + +enum { + IEEE802154_LLSEC_DEVKEY_IGNORE, + IEEE802154_LLSEC_DEVKEY_RESTRICT, + IEEE802154_LLSEC_DEVKEY_RECORD, + + __IEEE802154_LLSEC_DEVKEY_MAX, +}; + +struct ieee802154_llsec_device { + struct list_head list; + + __le16 pan_id; + __le16 short_addr; + __le64 hwaddr; + u32 frame_counter; + bool seclevel_exempt; + + u8 key_mode; + struct list_head keys; +}; + +struct ieee802154_llsec_seclevel { + struct list_head list; + + u8 frame_type; + u8 cmd_frame_id; + bool device_override; + u32 sec_levels; +}; + +struct ieee802154_llsec_params { + bool enabled; + + __be32 frame_counter; + u8 out_level; + struct ieee802154_llsec_key_id out_key; + + __le64 default_key_source; + + __le16 pan_id; + __le64 hwaddr; + __le64 coord_hwaddr; + __le16 coord_shortaddr; +}; + +struct ieee802154_llsec_table { + struct list_head keys; + struct list_head devices; + struct list_head security_levels; +}; + +#define IEEE802154_MAC_SCAN_ED 0 +#define IEEE802154_MAC_SCAN_ACTIVE 1 +#define IEEE802154_MAC_SCAN_PASSIVE 2 +#define IEEE802154_MAC_SCAN_ORPHAN 3 + +struct ieee802154_mac_params { + s8 transmit_power; + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + + bool lbt; + u8 cca_mode; + s32 cca_ed_level; +}; + +struct wpan_phy; + +enum { + IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0, + IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1, + IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2, + IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3, + IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4, + IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5, + IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6, + IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7, + IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8, +}; + +struct ieee802154_llsec_ops { + int (*get_params)(struct net_device *dev, + struct ieee802154_llsec_params *params); + int (*set_params)(struct net_device *dev, + const struct ieee802154_llsec_params *params, + int changed); + + int (*add_key)(struct net_device *dev, + const struct ieee802154_llsec_key_id *id, + const struct ieee802154_llsec_key *key); + int (*del_key)(struct net_device *dev, + const struct ieee802154_llsec_key_id *id); + + int (*add_dev)(struct net_device *dev, + const struct ieee802154_llsec_device *llsec_dev); + int (*del_dev)(struct net_device *dev, __le64 dev_addr); + + int (*add_devkey)(struct net_device *dev, + __le64 device_addr, + const struct ieee802154_llsec_device_key *key); + int (*del_devkey)(struct net_device *dev, + __le64 device_addr, + const struct ieee802154_llsec_device_key *key); + + int (*add_seclevel)(struct net_device *dev, + const struct ieee802154_llsec_seclevel *sl); + int (*del_seclevel)(struct net_device *dev, + const struct ieee802154_llsec_seclevel *sl); + + void (*lock_table)(struct net_device *dev); + void (*get_table)(struct net_device *dev, + struct ieee802154_llsec_table **t); + void (*unlock_table)(struct net_device *dev); +}; +/* + * This should be located at net_device->ml_priv + * + * get_phy should increment the reference counting on returned phy. + * Use wpan_wpy_put to put that reference. + */ +struct ieee802154_mlme_ops { + /* The following fields are optional (can be NULL). */ + + int (*assoc_req)(struct net_device *dev, + struct ieee802154_addr *addr, + u8 channel, u8 page, u8 cap); + int (*assoc_resp)(struct net_device *dev, + struct ieee802154_addr *addr, + __le16 short_addr, u8 status); + int (*disassoc_req)(struct net_device *dev, + struct ieee802154_addr *addr, + u8 reason); + int (*start_req)(struct net_device *dev, + struct ieee802154_addr *addr, + u8 channel, u8 page, u8 bcn_ord, u8 sf_ord, + u8 pan_coord, u8 blx, u8 coord_realign); + int (*scan_req)(struct net_device *dev, + u8 type, u32 channels, u8 page, u8 duration); + + int (*set_mac_params)(struct net_device *dev, + const struct ieee802154_mac_params *params); + void (*get_mac_params)(struct net_device *dev, + struct ieee802154_mac_params *params); + + struct ieee802154_llsec_ops *llsec; + + /* The fields below are required. */ + + struct wpan_phy *(*get_phy)(const struct net_device *dev); + + /* + * FIXME: these should become the part of PIB/MIB interface. + * However we still don't have IB interface of any kind + */ + __le16 (*get_pan_id)(const struct net_device *dev); + __le16 (*get_short_addr)(const struct net_device *dev); + u8 (*get_dsn)(const struct net_device *dev); +}; + +/* The IEEE 802.15.4 standard defines 2 type of the devices: + * - FFD - full functionality device + * - RFD - reduce functionality device + * + * So 2 sets of mlme operations are needed + */ +struct ieee802154_reduced_mlme_ops { + struct wpan_phy *(*get_phy)(const struct net_device *dev); +}; + +static inline struct ieee802154_mlme_ops * +ieee802154_mlme_ops(const struct net_device *dev) +{ + return dev->ml_priv; +} + +static inline struct ieee802154_reduced_mlme_ops * +ieee802154_reduced_mlme_ops(const struct net_device *dev) +{ + return dev->ml_priv; +} + +#endif diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index b2cfc492725..b4956a5fcc3 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -30,44 +30,55 @@ #define IF_PREFIX_ONLINK 0x01 #define IF_PREFIX_AUTOCONF 0x02 -#ifdef __KERNEL__ +enum { + INET6_IFADDR_STATE_PREDAD, + INET6_IFADDR_STATE_DAD, + INET6_IFADDR_STATE_POSTDAD, + INET6_IFADDR_STATE_ERRDAD, + INET6_IFADDR_STATE_UP, + INET6_IFADDR_STATE_DEAD, +}; -struct inet6_ifaddr -{ +struct inet6_ifaddr { struct in6_addr addr; __u32 prefix_len; + /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 valid_lft; __u32 prefered_lft; - unsigned long cstamp; /* created timestamp */ - unsigned long tstamp; /* updated timestamp */ atomic_t refcnt; spinlock_t lock; + spinlock_t state_lock; + + int state; - __u8 probes; - __u8 flags; + __u32 flags; + __u8 dad_probes; __u16 scope; - struct timer_list timer; + unsigned long cstamp; /* created timestamp */ + unsigned long tstamp; /* updated timestamp */ + + struct delayed_work dad_work; struct inet6_dev *idev; struct rt6_info *rt; - struct inet6_ifaddr *lst_next; /* next addr in addr_lst */ - struct inet6_ifaddr *if_next; /* next addr in inet6_dev */ + struct hlist_node addr_lst; + struct list_head if_list; -#ifdef CONFIG_IPV6_PRIVACY - struct inet6_ifaddr *tmp_next; /* next addr in tempaddr_lst */ + struct list_head tmp_list; struct inet6_ifaddr *ifpub; int regen_count; -#endif - int dead; + bool tokenized; + + struct rcu_head rcu; + struct in6_addr peer_addr; }; -struct ip6_sf_socklist -{ +struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct in6_addr sl_addr[0]; @@ -78,18 +89,17 @@ struct ip6_sf_socklist #define IP6_SFBLOCK 10 /* allocate this many at once */ -struct ipv6_mc_socklist -{ +struct ipv6_mc_socklist { struct in6_addr addr; int ifindex; - struct ipv6_mc_socklist *next; + struct ipv6_mc_socklist __rcu *next; rwlock_t sflock; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ip6_sf_socklist *sflist; + struct rcu_head rcu; }; -struct ip6_sf_list -{ +struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2]; /* include/exclude counts */ @@ -104,8 +114,7 @@ struct ip6_sf_list #define MAF_NOREPORT 0x08 #define MAF_GSQUERY 0x10 -struct ifmcaddr6 -{ +struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; @@ -115,7 +124,7 @@ struct ifmcaddr6 unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct timer_list mca_timer; - unsigned mca_flags; + unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; @@ -125,15 +134,13 @@ struct ifmcaddr6 /* Anycast stuff */ -struct ipv6_ac_socklist -{ +struct ipv6_ac_socklist { struct in6_addr acl_addr; int acl_ifindex; struct ipv6_ac_socklist *acl_next; }; -struct ifacaddr6 -{ +struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; @@ -148,31 +155,36 @@ struct ifacaddr6 #define IFA_HOST IPV6_ADDR_LOOPBACK #define IFA_LINK IPV6_ADDR_LINKLOCAL #define IFA_SITE IPV6_ADDR_SITELOCAL -#define IFA_GLOBAL 0x0000U struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; DEFINE_SNMP_STAT(struct ipstats_mib, ipv6); - DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6); - DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg); + DEFINE_SNMP_STAT_ATOMIC(struct icmpv6_mib_device, icmpv6dev); + DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib_device, icmpv6msgdev); }; -struct inet6_dev -{ - struct net_device *dev; +struct inet6_dev { + struct net_device *dev; - struct inet6_ifaddr *addr_list; + struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; - rwlock_t mc_lock; - unsigned char mc_qrv; + spinlock_t mc_lock; + + unsigned char mc_qrv; /* Query Robustness Variable */ unsigned char mc_gq_running; unsigned char mc_ifc_count; - unsigned long mc_v1_seen; + unsigned char mc_dad_count; + + unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */ + unsigned long mc_qi; /* Query Interval */ + unsigned long mc_qri; /* Query Response Interval */ unsigned long mc_maxdelay; + struct timer_list mc_gq_timer; /* general query timer */ struct timer_list mc_ifc_timer; /* interface change timer */ + struct timer_list mc_dad_timer; /* dad complete mc timer */ struct ifacaddr6 *ac_list; rwlock_t lock; @@ -180,23 +192,24 @@ struct inet6_dev __u32 if_flags; int dead; -#ifdef CONFIG_IPV6_PRIVACY u8 rndid[8]; struct timer_list regen_timer; - struct inet6_ifaddr *tempaddr_list; -#endif + struct list_head tempaddr_list; + + struct in6_addr token; struct neigh_parms *nd_parms; - struct inet6_dev *next; struct ipv6_devconf cnf; struct ipv6_devstat stats; + + struct timer_list rs_timer; + __u8 rs_probes; + unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; }; -extern struct ipv6_devconf ipv6_devconf; - -static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) +static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) { /* * +-------+-------+-------+-------+-------+-------+ @@ -210,60 +223,6 @@ static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } -static inline void ipv6_tr_mc_map(struct in6_addr *addr, char *buf) -{ - /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */ - - if (((addr->s6_addr[0] == 0xFF) && - ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 1)) || - ((addr->s6_addr[0] == 0xFF) && - (addr->s6_addr[1] == 0x02) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr16[4] == 0) && - (addr->s6_addr[10] == 0) && - (addr->s6_addr[11] == 1) && - (addr->s6_addr[12] == 0xff))) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x01; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - /* All routers FF0x::2 */ - } else if ((addr->s6_addr[0] ==0xff) && - ((addr->s6_addr[1] & 0xF0) == 0) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 2)) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x02; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - } else { - unsigned char i ; - - i = addr->s6_addr[15] & 7 ; - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x01 << i ; - buf[4]=0x00; - buf[5]=0x00; - } -} - static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf) { buf[0] = 0x00; @@ -286,5 +245,20 @@ static inline void ipv6_ib_mc_map(const struct in6_addr *addr, buf[9] = broadcast[9]; memcpy(buf + 10, addr->s6_addr + 6, 10); } -#endif + +static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr, + const unsigned char *broadcast, char *buf) +{ + if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) { + memcpy(buf, broadcast, 4); + } else { + /* v4mapped? */ + if ((addr->s6_addr32[0] | addr->s6_addr32[1] | + (addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0) + return -EINVAL; + memcpy(buf, &addr->s6_addr32[3], 4); + } + return 0; +} + #endif diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index f13ddc2543b..74af137304b 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -22,21 +22,25 @@ struct sk_buff; struct sock; struct sockaddr; -extern int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); +int inet6_csk_bind_conflict(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax); -extern struct request_sock *inet6_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, - const __be16 rport, - const struct in6_addr *raddr, - const struct in6_addr *laddr, - const int iif); +struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, + const struct request_sock *req); -extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk, - struct request_sock *req, - const unsigned long timeout); +struct request_sock *inet6_csk_search_req(const struct sock *sk, + struct request_sock ***prevp, + const __be16 rport, + const struct in6_addr *raddr, + const struct in6_addr *laddr, + const int iif); -extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); +void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, + const unsigned long timeout); -extern int inet6_csk_xmit(struct sk_buff *skb, int ipfragok); +void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); + +int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); + +struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET6_CONNECTION_SOCK_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 62a5b691858..ae061354430 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -15,7 +15,7 @@ #define _INET6_HASHTABLES_H -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/types.h> @@ -24,32 +24,21 @@ #include <net/inet_sock.h> #include <net/ipv6.h> +#include <net/netns/hash.h> struct inet_hashinfo; -/* I have no idea if this is a good hash for v6 or not. -DaveM */ -static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, - const struct in6_addr *faddr, const __be16 fport) +static inline unsigned int __inet6_ehashfn(const u32 lhash, + const u16 lport, + const u32 fhash, + const __be16 fport, + const u32 initval) { - u32 ports = (lport ^ (__force u16)fport); - - return jhash_3words((__force u32)laddr->s6_addr32[3], - (__force u32)faddr->s6_addr32[3], - ports, inet_ehash_secret); -} - -static inline int inet6_sk_ehashfn(const struct sock *sk) -{ - const struct inet_sock *inet = inet_sk(sk); - const struct ipv6_pinfo *np = inet6_sk(sk); - const struct in6_addr *laddr = &np->rcv_saddr; - const struct in6_addr *faddr = &np->daddr; - const __u16 lport = inet->num; - const __be16 fport = inet->dport; - return inet6_ehashfn(laddr, lport, faddr, fport); + const u32 ports = (((u32)lport) << 16) | (__force u32)fport; + return jhash_3words(lhash, fhash, ports, initval); } -extern void __inet6_hash(struct sock *sk); +int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so @@ -57,19 +46,19 @@ extern void __inet6_hash(struct sock *sk); * * The sockhash lock must be held as a reader here. */ -extern struct sock *__inet6_lookup_established(struct net *net, - struct inet_hashinfo *hashinfo, - const struct in6_addr *saddr, - const __be16 sport, - const struct in6_addr *daddr, - const u16 hnum, - const int dif); - -extern struct sock *inet6_lookup_listener(struct net *net, - struct inet_hashinfo *hashinfo, - const struct in6_addr *daddr, - const unsigned short hnum, - const int dif); +struct sock *__inet6_lookup_established(struct net *net, + struct inet_hashinfo *hashinfo, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 hnum, const int dif); + +struct sock *inet6_lookup_listener(struct net *net, + struct inet_hashinfo *hashinfo, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const unsigned short hnum, const int dif); static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, @@ -84,12 +73,29 @@ static inline struct sock *__inet6_lookup(struct net *net, if (sk) return sk; - return inet6_lookup_listener(net, hashinfo, daddr, hnum, dif); + return inet6_lookup_listener(net, hashinfo, saddr, sport, + daddr, hnum, dif); +} + +static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, + struct sk_buff *skb, + const __be16 sport, + const __be16 dport) +{ + struct sock *sk = skb_steal_sock(skb); + + if (sk) + return sk; + + return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, + &ipv6_hdr(skb)->saddr, sport, + &ipv6_hdr(skb)->daddr, ntohs(dport), + inet6_iif(skb)); } -extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, - const struct in6_addr *saddr, const __be16 sport, - const struct in6_addr *daddr, const __be16 dport, - const int dif); -#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */ +struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, + const struct in6_addr *saddr, const __be16 sport, + const struct in6_addr *daddr, const __be16 dport, + const int dif); +#endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 38d5a1e9980..fe7994c48b7 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -1,8 +1,8 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H -extern const struct proto_ops inet_stream_ops; -extern const struct proto_ops inet_dgram_ops; +extern const struct proto_ops inet_stream_ops; +extern const struct proto_ops inet_dgram_ops; /* * INET4 prototypes used by INET6 @@ -13,32 +13,34 @@ struct sock; struct sockaddr; struct socket; -extern int inet_release(struct socket *sock); -extern int inet_stream_connect(struct socket *sock, - struct sockaddr * uaddr, - int addr_len, int flags); -extern int inet_dgram_connect(struct socket *sock, - struct sockaddr * uaddr, - int addr_len, int flags); -extern int inet_accept(struct socket *sock, - struct socket *newsock, int flags); -extern int inet_sendmsg(struct kiocb *iocb, - struct socket *sock, - struct msghdr *msg, - size_t size); -extern int inet_shutdown(struct socket *sock, int how); -extern int inet_listen(struct socket *sock, int backlog); - -extern void inet_sock_destruct(struct sock *sk); - -extern int inet_bind(struct socket *sock, - struct sockaddr *uaddr, int addr_len); -extern int inet_getname(struct socket *sock, - struct sockaddr *uaddr, - int *uaddr_len, int peer); -extern int inet_ioctl(struct socket *sock, - unsigned int cmd, unsigned long arg); +int inet_release(struct socket *sock); +int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags); +int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags); +int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags); +int inet_accept(struct socket *sock, struct socket *newsock, int flags); +int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + size_t size); +ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + size_t size, int flags); +int inet_shutdown(struct socket *sock, int how); +int inet_listen(struct socket *sock, int backlog); +void inet_sock_destruct(struct sock *sk); +int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, + int peer); +int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +int inet_ctl_sock_create(struct sock **sk, unsigned short family, + unsigned short type, unsigned char protocol, + struct net *net); + +static inline void inet_ctl_sock_destroy(struct sock *sk) +{ + sk_release_kernel(sk); +} #endif - - diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index f00f0573627..7a431388756 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -36,30 +36,32 @@ struct tcp_congestion_ops; * (i.e. things that depend on the address family) */ struct inet_connection_sock_af_ops { - int (*queue_xmit)(struct sk_buff *skb, int ipfragok); - void (*send_check)(struct sock *sk, int len, - struct sk_buff *skb); + int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); + void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); + void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst); - int (*remember_stamp)(struct sock *sk); u16 net_header_len; + u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); +#ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); int (*compat_getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); +#endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); int (*bind_conflict)(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); }; /** inet_connection_sock - INET connection oriented sock @@ -124,13 +126,15 @@ struct inet_connection_sock { int probe_size; } icsk_mtup; u32 icsk_ca_priv[16]; + u32 icsk_user_timeout; #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ -#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */ +#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ +#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ static inline struct inet_connection_sock *inet_csk(const struct sock *sk) { @@ -142,9 +146,9 @@ static inline void *inet_csk_ca(const struct sock *sk) return (void *)inet_csk(sk)->icsk_ca_priv; } -extern struct sock *inet_csk_clone(struct sock *sk, - const struct request_sock *req, - const gfp_t priority); +struct sock *inet_csk_clone_lock(const struct sock *sk, + const struct request_sock *req, + const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, @@ -153,11 +157,11 @@ enum inet_csk_ack_state_t { ICSK_ACK_PUSHED2 = 8 }; -extern void inet_csk_init_xmit_timers(struct sock *sk, - void (*retransmit_handler)(unsigned long), - void (*delack_handler)(unsigned long), - void (*keepalive_handler)(unsigned long)); -extern void inet_csk_clear_xmit_timers(struct sock *sk); +void inet_csk_init_xmit_timers(struct sock *sk, + void (*retransmit_handler)(unsigned long), + void (*delack_handler)(unsigned long), + void (*keepalive_handler)(unsigned long)); +void inet_csk_clear_xmit_timers(struct sock *sk); static inline void inet_csk_schedule_ack(struct sock *sk) { @@ -174,8 +178,8 @@ static inline void inet_csk_delack_init(struct sock *sk) memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); } -extern void inet_csk_delete_keepalive_timer(struct sock *sk); -extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); +void inet_csk_delete_keepalive_timer(struct sock *sk); +void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); #ifdef INET_CSK_DEBUG extern const char inet_csk_timer_bug_msg[]; @@ -220,7 +224,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, when = max_when; } - if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { + if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || + what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending = what; icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); @@ -236,18 +241,20 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, #endif } -extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); +struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -extern struct request_sock *inet_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, - const __be16 rport, - const __be32 raddr, - const __be32 laddr); -extern int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); -extern int inet_csk_get_port(struct sock *sk, unsigned short snum); +struct request_sock *inet_csk_search_req(const struct sock *sk, + struct request_sock ***prevp, + const __be16 rport, + const __be32 raddr, + const __be32 laddr); +int inet_csk_bind_conflict(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax); +int inet_csk_get_port(struct sock *sk, unsigned short snum); -extern struct dst_entry* inet_csk_route_req(struct sock *sk, +struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, + const struct request_sock *req); +struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, const struct request_sock *req); static inline void inet_csk_reqsk_queue_add(struct sock *sk, @@ -257,9 +264,8 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk, reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); } -extern void inet_csk_reqsk_queue_hash_add(struct sock *sk, - struct request_sock *req, - unsigned long timeout); +void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, + unsigned long timeout); static inline void inet_csk_reqsk_queue_removed(struct sock *sk, struct request_sock *req) @@ -306,12 +312,13 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk, reqsk_free(req); } -extern void inet_csk_reqsk_queue_prune(struct sock *parent, - const unsigned long interval, - const unsigned long timeout, - const unsigned long max_rto); +void inet_csk_reqsk_queue_prune(struct sock *parent, + const unsigned long interval, + const unsigned long timeout, + const unsigned long max_rto); -extern void inet_csk_destroy_sock(struct sock *sk); +void inet_csk_destroy_sock(struct sock *sk); +void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. @@ -322,18 +329,15 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk) (POLLIN | POLLRDNORM) : 0; } -extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); -extern void inet_csk_listen_stop(struct sock *sk); +int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); +void inet_csk_listen_stop(struct sock *sk); -extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); +void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); -extern int inet_csk_ctl_sock_create(struct socket **sock, - unsigned short family, - unsigned short type, - unsigned char protocol); +int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); -extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, int optlen); +struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index ba33db05385..84b20835b73 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -15,6 +15,8 @@ enum { INET_ECN_MASK = 3, }; +extern int sysctl_tunnel_ecn_log; + static inline int INET_ECN_is_ce(__u8 dsfield) { return (dsfield & INET_ECN_MASK) == INET_ECN_CE; @@ -27,9 +29,17 @@ static inline int INET_ECN_is_not_ect(__u8 dsfield) static inline int INET_ECN_is_capable(__u8 dsfield) { - return (dsfield & INET_ECN_ECT_0); + return dsfield & INET_ECN_ECT_0; } +/* + * RFC 3168 9.1.1 + * The full-functionality option for ECN encapsulation is to copy the + * ECN codepoint of the inside header to the outside header on + * encapsulation if the inside header is not-ECT or ECT, and to set the + * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of + * the inside header is CE. + */ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) { outer &= ~INET_ECN_MASK; @@ -38,16 +48,26 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) return outer; } -#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) -#define INET_ECN_dontxmit(sk) \ - do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) +static inline void INET_ECN_xmit(struct sock *sk) +{ + inet_sk(sk)->tos |= INET_ECN_ECT_0; + if (inet6_sk(sk) != NULL) + inet6_sk(sk)->tclass |= INET_ECN_ECT_0; +} + +static inline void INET_ECN_dontxmit(struct sock *sk) +{ + inet_sk(sk)->tos &= ~INET_ECN_MASK; + if (inet6_sk(sk) != NULL) + inet6_sk(sk)->tclass &= ~INET_ECN_MASK; +} #define IP6_ECN_flow_init(label) do { \ (label) &= ~htonl(INET_ECN_MASK << 20); \ } while (0) #define IP6_ECN_flow_xmit(sk, label) do { \ - if (INET_ECN_is_capable(inet_sk(sk)->tos)) \ + if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \ (label) |= htonl(INET_ECN_ECT_0 << 20); \ } while (0) @@ -113,13 +133,15 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) static inline int INET_ECN_set_ce(struct sk_buff *skb) { switch (skb->protocol) { - case __constant_htons(ETH_P_IP): - if (skb->network_header + sizeof(struct iphdr) <= skb->tail) + case cpu_to_be16(ETH_P_IP): + if (skb_network_header(skb) + sizeof(struct iphdr) <= + skb_tail_pointer(skb)) return IP_ECN_set_ce(ip_hdr(skb)); break; - case __constant_htons(ETH_P_IPV6): - if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail) + case cpu_to_be16(ETH_P_IPV6): + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= + skb_tail_pointer(skb)) return IP6_ECN_set_ce(ipv6_hdr(skb)); break; } @@ -127,4 +149,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) return 0; } +/* + * RFC 6040 4.2 + * To decapsulate the inner header at the tunnel egress, a compliant + * tunnel egress MUST set the outgoing ECN field to the codepoint at the + * intersection of the appropriate arriving inner header (row) and outer + * header (column) in Figure 4 + * + * +---------+------------------------------------------------+ + * |Arriving | Arriving Outer Header | + * | Inner +---------+------------+------------+------------+ + * | Header | Not-ECT | ECT(0) | ECT(1) | CE | + * +---------+---------+------------+------------+------------+ + * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)| + * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE | + * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE | + * | CE | CE | CE | CE(!!!)| CE | + * +---------+---------+------------+------------+------------+ + * + * Figure 4: New IP in IP Decapsulation Behaviour + * + * returns 0 on success + * 1 if something is broken and should be logged (!!! above) + * 2 if packet should be dropped + */ +static inline int INET_ECN_decapsulate(struct sk_buff *skb, + __u8 outer, __u8 inner) +{ + if (INET_ECN_is_not_ect(inner)) { + switch (outer & INET_ECN_MASK) { + case INET_ECN_NOT_ECT: + return 0; + case INET_ECN_ECT_0: + case INET_ECN_ECT_1: + return 1; + case INET_ECN_CE: + return 2; + } + } + + if (INET_ECN_is_ce(outer)) + INET_ECN_set_ce(skb); + + return 0; +} + +static inline int IP_ECN_decapsulate(const struct iphdr *oiph, + struct sk_buff *skb) +{ + __u8 inner; + + if (skb->protocol == htons(ETH_P_IP)) + inner = ip_hdr(skb)->tos; + else if (skb->protocol == htons(ETH_P_IPV6)) + inner = ipv6_get_dsfield(ipv6_hdr(skb)); + else + return 0; + + return INET_ECN_decapsulate(skb, oiph->tos, inner); +} + +static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, + struct sk_buff *skb) +{ + __u8 inner; + + if (skb->protocol == htons(ETH_P_IP)) + inner = ip_hdr(skb)->tos; + else if (skb->protocol == htons(ETH_P_IPV6)) + inner = ipv6_get_dsfield(ipv6_hdr(skb)); + else + return 0; + + return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); +} #endif diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 7374251b978..6f59de98dab 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -1,10 +1,17 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ +#include <linux/percpu_counter.h> + struct netns_frags { int nqueues; - atomic_t mem; struct list_head lru_list; + spinlock_t lru_lock; + + /* The percpu_counter "mem" need to be cacheline aligned. + * mem.count must not share cacheline with other writers + */ + struct percpu_counter mem ____cacheline_aligned_in_smp; /* sysctls */ int timeout; @@ -13,40 +20,63 @@ struct netns_frags { }; struct inet_frag_queue { - struct hlist_node list; - struct netns_frags *net; - struct list_head lru_list; /* lru list member */ spinlock_t lock; - atomic_t refcnt; struct timer_list timer; /* when will this queue expire? */ + struct list_head lru_list; /* lru list member */ + struct hlist_node list; + atomic_t refcnt; struct sk_buff *fragments; /* list of received fragments */ + struct sk_buff *fragments_tail; ktime_t stamp; int len; /* total length of orig datagram */ int meat; __u8 last_in; /* first/last segment arrived? */ -#define COMPLETE 4 -#define FIRST_IN 2 -#define LAST_IN 1 +#define INET_FRAG_COMPLETE 4 +#define INET_FRAG_FIRST_IN 2 +#define INET_FRAG_LAST_IN 1 + + u16 max_size; + + struct netns_frags *net; }; -#define INETFRAGS_HASHSZ 64 +#define INETFRAGS_HASHSZ 1024 + +/* averaged: + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or + * struct frag_queue)) + */ +#define INETFRAGS_MAXDEPTH 128 + +struct inet_frag_bucket { + struct hlist_head chain; + spinlock_t chain_lock; +}; struct inet_frags { - struct hlist_head hash[INETFRAGS_HASHSZ]; - rwlock_t lock; - u32 rnd; - int qsize; + struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; + /* This rwlock is a global lock (seperate per IPv4, IPv6 and + * netfilter). Important to keep this on a seperate cacheline. + * Its primarily a rebuild protection rwlock. + */ + rwlock_t lock ____cacheline_aligned_in_smp; int secret_interval; struct timer_list secret_timer; + /* The first call to hashfn is responsible to initialize + * rnd. This is best done with net_get_random_once. + */ + u32 rnd; + int qsize; + unsigned int (*hashfn)(struct inet_frag_queue *); + bool (*match)(struct inet_frag_queue *q, void *arg); void (*constructor)(struct inet_frag_queue *q, void *arg); void (*destructor)(struct inet_frag_queue *); void (*skb_free)(struct sk_buff *); - int (*match)(struct inet_frag_queue *q, - void *arg); void (*frag_expire)(unsigned long data); }; @@ -59,9 +89,12 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, int *work); -int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f); +int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, - struct inet_frags *f, void *key, unsigned int hash); + struct inet_frags *f, void *key, unsigned int hash) + __releases(&f->lock); +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix); static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) { @@ -69,4 +102,80 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f inet_frag_destroy(q, f, NULL); } +/* Memory Tracking Functions. */ + +/* The default percpu_counter batch size is not big enough to scale to + * fragmentation mem acct sizes. + * The mem size of a 64K fragment is approx: + * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes + */ +static unsigned int frag_percpu_counter_batch = 130000; + +static inline int frag_mem_limit(struct netns_frags *nf) +{ + return percpu_counter_read(&nf->mem); +} + +static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) +{ + __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); +} + +static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) +{ + __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); +} + +static inline void init_frag_mem_limit(struct netns_frags *nf) +{ + percpu_counter_init(&nf->mem, 0); +} + +static inline int sum_frag_mem_limit(struct netns_frags *nf) +{ + int res; + + local_bh_disable(); + res = percpu_counter_sum_positive(&nf->mem); + local_bh_enable(); + + return res; +} + +static inline void inet_frag_lru_move(struct inet_frag_queue *q) +{ + spin_lock(&q->net->lru_lock); + if (!list_empty(&q->lru_list)) + list_move_tail(&q->lru_list, &q->net->lru_list); + spin_unlock(&q->net->lru_lock); +} + +static inline void inet_frag_lru_del(struct inet_frag_queue *q) +{ + spin_lock(&q->net->lru_lock); + list_del_init(&q->lru_list); + q->net->nqueues--; + spin_unlock(&q->net->lru_lock); +} + +static inline void inet_frag_lru_add(struct netns_frags *nf, + struct inet_frag_queue *q) +{ + spin_lock(&nf->lru_lock); + list_add_tail(&q->lru_list, &nf->lru_list); + q->net->nqueues++; + spin_unlock(&nf->lru_lock); +} + +/* RFC 3168 support : + * We want to check ECN values of all fragments, do detect invalid combinations. + * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. + */ +#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ +#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ +#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ +#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ + +extern const u8 ip_frag_ecn_table[16]; + #endif diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 97dc35ad09b..dd1950a7e27 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -16,6 +16,7 @@ #include <linux/interrupt.h> +#include <linux/ip.h> #include <linux/ipv6.h> #include <linux/list.h> #include <linux/slab.h> @@ -28,18 +29,19 @@ #include <net/inet_connection_sock.h> #include <net/inet_sock.h> #include <net/sock.h> +#include <net/route.h> #include <net/tcp_states.h> +#include <net/netns/hash.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/byteorder.h> /* This is for all connections with a full identity, no wildcards. - * One chain is dedicated to TIME_WAIT sockets. - * I'll experiment with dynamic table growth later. + * The 'e' prefix stands for Establish, but we really put all sockets + * but LISTEN ones. */ struct inet_ehash_bucket { - struct hlist_head chain; - struct hlist_head twchain; + struct hlist_nulls_head chain; }; /* There are a few simple rules, which allow for local port reuse by @@ -74,21 +76,43 @@ struct inet_ehash_bucket { * ports are created in O(1) time? I thought so. ;-) -DaveM */ struct inet_bind_bucket { +#ifdef CONFIG_NET_NS struct net *ib_net; +#endif unsigned short port; - signed short fastreuse; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + int num_owners; struct hlist_node node; struct hlist_head owners; }; -#define inet_bind_bucket_for_each(tb, node, head) \ - hlist_for_each_entry(tb, node, head, node) +static inline struct net *ib_net(struct inet_bind_bucket *ib) +{ + return read_pnet(&ib->ib_net); +} + +#define inet_bind_bucket_for_each(tb, head) \ + hlist_for_each_entry(tb, head, node) struct inet_bind_hashbucket { spinlock_t lock; struct hlist_head chain; }; +/* + * Sockets can be hashed in established or listening table + * We must use different 'nulls' end-of-chain value for listening + * hash table, or we might find a socket that was closed and + * reallocated/inserted into established hash table + */ +#define LISTENING_NULLS_BASE (1U << 29) +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head head; +}; + /* This is for listening sockets, thus all sockets which possess wildcards. */ #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ @@ -98,11 +122,10 @@ struct inet_hashinfo { * * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE * - * TIME_WAIT sockets use a separate chain (twchain). */ struct inet_ehash_bucket *ehash; - rwlock_t *ehash_locks; - unsigned int ehash_size; + spinlock_t *ehash_locks; + unsigned int ehash_mask; unsigned int ehash_locks_mask; /* Ok, let's try this, I give up, we do need a local binding @@ -111,34 +134,34 @@ struct inet_hashinfo { struct inet_bind_hashbucket *bhash; unsigned int bhash_size; - /* Note : 4 bytes padding on 64 bit arches */ + /* 4 bytes hole on 64 bit */ - /* All sockets in TCP_LISTEN state will be in here. This is the only - * table where wildcard'd TCP sockets can exist. Hash function here - * is just local port number. - */ - struct hlist_head listening_hash[INET_LHTABLE_SIZE]; + struct kmem_cache *bind_bucket_cachep; /* All the above members are written once at bootup and * never written again _or_ are predominantly read-access. * * Now align to a new cache line as all the following members - * are often dirty. + * might be often dirty. + */ + /* All sockets in TCP_LISTEN state will be in here. This is the only + * table where wildcard'd TCP sockets can exist. Hash function here + * is just local port number. */ - rwlock_t lhash_lock ____cacheline_aligned; - atomic_t lhash_users; - wait_queue_head_t lhash_wait; - struct kmem_cache *bind_bucket_cachep; + struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] + ____cacheline_aligned_in_smp; + + atomic_t bsockets; }; static inline struct inet_ehash_bucket *inet_ehash_bucket( struct inet_hashinfo *hashinfo, unsigned int hash) { - return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)]; + return &hashinfo->ehash[hash & hashinfo->ehash_mask]; } -static inline rwlock_t *inet_ehash_lockp( +static inline spinlock_t *inet_ehash_lockp( struct inet_hashinfo *hashinfo, unsigned int hash) { @@ -161,18 +184,18 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) size = 2048; if (nr_pcpus >= 32) size = 4096; - if (sizeof(rwlock_t) != 0) { + if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA - if (size * sizeof(rwlock_t) > PAGE_SIZE) - hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t)); + if (size * sizeof(spinlock_t) > PAGE_SIZE) + hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); else #endif - hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t), + hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), GFP_KERNEL); if (!hashinfo->ehash_locks) return ENOMEM; for (i = 0; i < size; i++) - rwlock_init(&hashinfo->ehash_locks[i]); + spin_lock_init(&hashinfo->ehash_locks[i]); } hashinfo->ehash_locks_mask = size - 1; return 0; @@ -183,7 +206,7 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) if (hashinfo->ehash_locks) { #ifdef CONFIG_NUMA unsigned int size = (hashinfo->ehash_locks_mask + 1) * - sizeof(rwlock_t); + sizeof(spinlock_t); if (size > PAGE_SIZE) vfree(hashinfo->ehash_locks); else @@ -193,93 +216,58 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) } } -extern struct inet_bind_bucket * - inet_bind_bucket_create(struct kmem_cache *cachep, - struct net *net, - struct inet_bind_hashbucket *head, - const unsigned short snum); -extern void inet_bind_bucket_destroy(struct kmem_cache *cachep, - struct inet_bind_bucket *tb); +struct inet_bind_bucket * +inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, + struct inet_bind_hashbucket *head, + const unsigned short snum); +void inet_bind_bucket_destroy(struct kmem_cache *cachep, + struct inet_bind_bucket *tb); -static inline int inet_bhashfn(const __u16 lport, const int bhash_size) +static inline int inet_bhashfn(struct net *net, const __u16 lport, + const int bhash_size) { - return lport & (bhash_size - 1); + return (lport + net_hash_mix(net)) & (bhash_size - 1); } -extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, - const unsigned short snum); +void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, + const unsigned short snum); /* These can have wildcards, don't try too hard. */ -static inline int inet_lhashfn(const unsigned short num) +static inline int inet_lhashfn(struct net *net, const unsigned short num) { - return num & (INET_LHTABLE_SIZE - 1); + return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); } static inline int inet_sk_listen_hashfn(const struct sock *sk) { - return inet_lhashfn(inet_sk(sk)->num); + return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); } /* Caller must disable local BH processing. */ -static inline void __inet_inherit_port(struct sock *sk, struct sock *child) -{ - struct inet_hashinfo *table = sk->sk_prot->hashinfo; - const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); - struct inet_bind_hashbucket *head = &table->bhash[bhash]; - struct inet_bind_bucket *tb; - - spin_lock(&head->lock); - tb = inet_csk(sk)->icsk_bind_hash; - sk_add_bind_node(child, &tb->owners); - inet_csk(child)->icsk_bind_hash = tb; - spin_unlock(&head->lock); -} +int __inet_inherit_port(struct sock *sk, struct sock *child); -static inline void inet_inherit_port(struct sock *sk, struct sock *child) -{ - local_bh_disable(); - __inet_inherit_port(sk, child); - local_bh_enable(); -} - -extern void inet_put_port(struct sock *sk); - -extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); - -/* - * - We may sleep inside this lock. - * - If sleeping is not required (or called from BH), - * use plain read_(un)lock(&inet_hashinfo.lhash_lock). - */ -static inline void inet_listen_lock(struct inet_hashinfo *hashinfo) -{ - /* read_lock synchronizes to candidates to writers */ - read_lock(&hashinfo->lhash_lock); - atomic_inc(&hashinfo->lhash_users); - read_unlock(&hashinfo->lhash_lock); -} +void inet_put_port(struct sock *sk); -static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) -{ - if (atomic_dec_and_test(&hashinfo->lhash_users)) - wake_up(&hashinfo->lhash_wait); -} +void inet_hashinfo_init(struct inet_hashinfo *h); -extern void __inet_hash_nolisten(struct sock *sk); -extern void inet_hash(struct sock *sk); -extern void inet_unhash(struct sock *sk); +int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); +void inet_hash(struct sock *sk); +void inet_unhash(struct sock *sk); -extern struct sock *__inet_lookup_listener(struct net *net, - struct inet_hashinfo *hashinfo, - const __be32 daddr, - const unsigned short hnum, - const int dif); +struct sock *__inet_lookup_listener(struct net *net, + struct inet_hashinfo *hashinfo, + const __be32 saddr, const __be16 sport, + const __be32 daddr, + const unsigned short hnum, + const int dif); static inline struct sock *inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { - return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif); + return __inet_lookup_listener(net, hashinfo, saddr, sport, + daddr, ntohs(dport), dif); } /* Socket demux engine toys. */ @@ -291,7 +279,6 @@ static inline struct sock *inet_lookup_listener(struct net *net, On 64bit targets we combine comparisons with pair of adjacent __be32 fields in the same way. */ -typedef __u32 __bitwise __portpair; #ifdef __BIG_ENDIAN #define INET_COMBINED_PORTS(__sport, __dport) \ ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport))) @@ -301,42 +288,34 @@ typedef __u32 __bitwise __portpair; #endif #if (BITS_PER_LONG == 64) -typedef __u64 __bitwise __addrpair; #ifdef __BIG_ENDIAN #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const __addrpair __name = (__force __addrpair) ( \ (((__force __u64)(__be32)(__saddr)) << 32) | \ - ((__force __u64)(__be32)(__daddr))); + ((__force __u64)(__be32)(__daddr))) #else /* __LITTLE_ENDIAN */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const __addrpair __name = (__force __addrpair) ( \ (((__force __u64)(__be32)(__daddr)) << 32) | \ - ((__force __u64)(__be32)(__saddr))); + ((__force __u64)(__be32)(__saddr))) #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ - (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ - ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ - ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ - (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ - (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ - ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ - ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ - (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) +#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_portpair == (__ports)) && \ + ((__sk)->sk_addrpair == (__cookie)) && \ + (!(__sk)->sk_bound_dev_if || \ + ((__sk)->sk_bound_dev_if == (__dif))) && \ + net_eq(sock_net(__sk), (__net))) #else /* 32-bit arch */ -#define INET_ADDR_COOKIE(__name, __saddr, __daddr) -#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ - (inet_sk(__sk)->daddr == (__saddr)) && \ - (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ - ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ - (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ - (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ - (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ - ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ - (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) +#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ + const int __name __deprecated __attribute__((unused)) + +#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_portpair == (__ports)) && \ + ((__sk)->sk_daddr == (__saddr)) && \ + ((__sk)->sk_rcv_saddr == (__daddr)) && \ + (!(__sk)->sk_bound_dev_if || \ + ((__sk)->sk_bound_dev_if == (__dif))) && \ + net_eq(sock_net(__sk), (__net))) #endif /* 64-bit arch */ /* @@ -345,10 +324,11 @@ typedef __u64 __bitwise __addrpair; * * Local BH must be disabled here. */ -extern struct sock * __inet_lookup_established(struct net *net, - struct inet_hashinfo *hashinfo, - const __be32 saddr, const __be16 sport, - const __be32 daddr, const u16 hnum, const int dif); +struct sock *__inet_lookup_established(struct net *net, + struct inet_hashinfo *hashinfo, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 hnum, + const int dif); static inline struct sock * inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, @@ -370,7 +350,8 @@ static inline struct sock *__inet_lookup(struct net *net, struct sock *sk = __inet_lookup_established(net, hashinfo, saddr, sport, daddr, hnum, dif); - return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif); + return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport, + daddr, hnum, dif); } static inline struct sock *inet_lookup(struct net *net, @@ -388,11 +369,30 @@ static inline struct sock *inet_lookup(struct net *net, return sk; } -extern int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, - int (*check_established)(struct inet_timewait_death_row *, - struct sock *, __u16, struct inet_timewait_sock **), - void (*hash)(struct sock *sk)); -extern int inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk); +static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, + struct sk_buff *skb, + const __be16 sport, + const __be16 dport) +{ + struct sock *sk = skb_steal_sock(skb); + const struct iphdr *iph = ip_hdr(skb); + + if (sk) + return sk; + else + return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, + iph->saddr, sport, + iph->daddr, dport, inet_iif(skb)); +} + +int __inet_hash_connect(struct inet_timewait_death_row *death_row, + struct sock *sk, u32 port_offset, + int (*check_established)(struct inet_timewait_death_row *, + struct sock *, __u16, + struct inet_timewait_sock **), + int (*hash)(struct sock *sk, + struct inet_timewait_sock *twp)); + +int inet_hash_connect(struct inet_timewait_death_row *death_row, + struct sock *sk); #endif /* _INET_HASHTABLES_H */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 89cd011edb9..b1edf17bec0 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -17,19 +17,21 @@ #define _INET_SOCK_H +#include <linux/kmemcheck.h> #include <linux/string.h> #include <linux/types.h> #include <linux/jhash.h> +#include <linux/netdevice.h> #include <net/flow.h> #include <net/sock.h> #include <net/request_sock.h> -#include <net/route.h> +#include <net/netns/hash.h> /** struct ip_options - IP Options * * @faddr - Saved first hop address - * @is_data - Options in __data, rather than skb + * @nexthop - Saved nexthop address in LSRR and SSRR * @is_strictroute - Strict source route * @srr_is_hit - Packet destination addr was our one * @is_changed - IP checksum more not valid @@ -39,12 +41,12 @@ */ struct ip_options { __be32 faddr; + __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; unsigned char ts; - unsigned char is_data:1, - is_strictroute:1, + unsigned char is_strictroute:1, srr_is_hit:1, is_changed:1, rr_needaddr:1, @@ -56,25 +58,39 @@ struct ip_options { unsigned char __data[0]; }; -#define optlength(opt) (sizeof(struct ip_options) + opt->optlen) +struct ip_options_rcu { + struct rcu_head rcu; + struct ip_options opt; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; struct inet_request_sock { struct request_sock req; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - u16 inet6_rsk_offset; - /* 2 bytes hole, try to pack */ -#endif - __be32 loc_addr; - __be32 rmt_addr; - __be16 rmt_port; - u16 snd_wscale : 4, - rcv_wscale : 4, +#define ir_loc_addr req.__req_common.skc_rcv_saddr +#define ir_rmt_addr req.__req_common.skc_daddr +#define ir_num req.__req_common.skc_num +#define ir_rmt_port req.__req_common.skc_dport +#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr +#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr +#define ir_iif req.__req_common.skc_bound_dev_if + + kmemcheck_bitfield_begin(flags); + u16 snd_wscale : 4, + rcv_wscale : 4, tstamp_ok : 1, sack_ok : 1, wscale_ok : 1, ecn_ok : 1, - acked : 1; - struct ip_options *opt; + acked : 1, + no_srccheck: 1; + kmemcheck_bitfield_end(flags); + struct ip_options_rcu *opt; + struct sk_buff *pktopts; + u32 ir_mark; }; static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) @@ -82,6 +98,33 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) return (struct inet_request_sock *)sk; } +static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb) +{ + if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) { + return skb->mark; + } else { + return sk->sk_mark; + } +} + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; /* Total length of all frames */ + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + struct ip_mc_socklist; struct ipv6_pinfo; struct rtable; @@ -90,17 +133,18 @@ struct rtable; * * @sk - ancestor class * @pinet6 - pointer to IPv6 control block - * @daddr - Foreign IPv4 addr - * @rcv_saddr - Bound local IPv4 addr - * @dport - Destination port - * @num - Local port - * @saddr - Sending source + * @inet_daddr - Foreign IPv4 addr + * @inet_rcv_saddr - Bound local IPv4 addr + * @inet_dport - Destination port + * @inet_num - Local port + * @inet_saddr - Sending source * @uc_ttl - Unicast TTL - * @sport - Source port - * @id - ID counter for DF pkts + * @inet_sport - Source port + * @inet_id - ID counter for DF pkts * @tos - TOS * @mc_ttl - Multicasting TTL * @is_icsk - is this an inet_connection_sock? + * @uc_index - Unicast outgoing device index * @mc_index - Multicast device index * @mc_list - Group array * @cork - info to build ip hdr on each ip frag while socket is corked @@ -108,40 +152,41 @@ struct rtable; struct inet_sock { /* sk and pinet6 has to be the first two members of inet_sock */ struct sock sk; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *pinet6; #endif /* Socket demultiplex comparisons on incoming packets. */ - __be32 daddr; - __be32 rcv_saddr; - __be16 dport; - __u16 num; - __be32 saddr; +#define inet_daddr sk.__sk_common.skc_daddr +#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr +#define inet_dport sk.__sk_common.skc_dport +#define inet_num sk.__sk_common.skc_num + + __be32 inet_saddr; __s16 uc_ttl; __u16 cmsg_flags; - struct ip_options *opt; - __be16 sport; - __u16 id; + __be16 inet_sport; + __u16 inet_id; + + struct ip_options_rcu __rcu *inet_opt; + int rx_dst_ifindex; __u8 tos; + __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; __u8 recverr:1, is_icsk:1, freebind:1, hdrincl:1, - mc_loop:1; + mc_loop:1, + transparent:1, + mc_all:1, + nodefrag:1; + __u8 rcv_tos; + int uc_index; int mc_index; __be32 mc_addr; - struct ip_mc_socklist *mc_list; - struct { - unsigned int flags; - unsigned int fragsize; - struct ip_options *opt; - struct rtable *rt; - int length; /* Total length of all frames */ - __be32 addr; - struct flowi fl; - } cork; + struct ip_mc_socklist __rcu *mc_list; + struct inet_cork_full cork; }; #define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ @@ -159,7 +204,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to, memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1, sk_from->sk_prot->obj_size - ancestor_size); } -#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) +#if !(IS_ENABLED(CONFIG_IPV6)) static inline void inet_sk_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { @@ -167,35 +212,40 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, } #endif -extern int inet_sk_rebuild_header(struct sock *sk); - -extern u32 inet_ehash_secret; -extern void build_ehash_secret(void); +int inet_sk_rebuild_header(struct sock *sk); -static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport, - const __be32 faddr, const __be16 fport) +static inline unsigned int __inet_ehashfn(const __be32 laddr, + const __u16 lport, + const __be32 faddr, + const __be16 fport, + u32 initval) { return jhash_3words((__force __u32) laddr, (__force __u32) faddr, ((__u32) lport) << 16 | (__force __u32)fport, - inet_ehash_secret); + initval); } -static inline int inet_sk_ehashfn(const struct sock *sk) +static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops) { - const struct inet_sock *inet = inet_sk(sk); - const __be32 laddr = inet->rcv_saddr; - const __u16 lport = inet->num; - const __be32 faddr = inet->daddr; - const __be16 fport = inet->dport; + struct request_sock *req = reqsk_alloc(ops); + struct inet_request_sock *ireq = inet_rsk(req); - return inet_ehashfn(laddr, lport, faddr, fport); -} + if (req != NULL) { + kmemcheck_annotate_bitfield(ireq, flags); + ireq->opt = NULL; + } + return req; +} -static inline int inet_iif(const struct sk_buff *skb) +static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { - return ((struct rtable *)skb->dst)->rt_iif; + __u8 flags = 0; + + if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) + flags |= FLOWI_FLAG_ANYSRC; + return flags; } #endif /* _INET_SOCK_H */ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 296547bfb0b..61474ea0215 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -16,8 +16,8 @@ #define _INET_TIMEWAIT_SOCK_ +#include <linux/kmemcheck.h> #include <linux/list.h> -#include <linux/module.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -27,7 +27,7 @@ #include <net/tcp_states.h> #include <net/timewait_sock.h> -#include <asm/atomic.h> +#include <linux/atomic.h> struct inet_hashinfo; @@ -58,6 +58,11 @@ struct inet_hashinfo; # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #endif +static inline u32 inet_tw_time_stamp(void) +{ + return jiffies; +} + /* TIME_WAIT reaping mechanism. */ #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */ @@ -83,15 +88,9 @@ struct inet_timewait_death_row { int sysctl_max_tw_buckets; }; -extern void inet_twdr_hangman(unsigned long data); -extern void inet_twdr_twkill_work(struct work_struct *work); -extern void inet_twdr_twcal_tick(unsigned long data); - -#if (BITS_PER_LONG == 64) -#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8 -#else -#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4 -#endif +void inet_twdr_hangman(unsigned long data); +void inet_twdr_twkill_work(struct work_struct *work); +void inet_twdr_twcal_tick(unsigned long data); struct inet_bind_bucket; @@ -110,43 +109,39 @@ struct inet_timewait_sock { #define tw_state __tw_common.skc_state #define tw_reuse __tw_common.skc_reuse #define tw_bound_dev_if __tw_common.skc_bound_dev_if -#define tw_node __tw_common.skc_node +#define tw_node __tw_common.skc_nulls_node #define tw_bind_node __tw_common.skc_bind_node #define tw_refcnt __tw_common.skc_refcnt #define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot #define tw_net __tw_common.skc_net +#define tw_daddr __tw_common.skc_daddr +#define tw_v6_daddr __tw_common.skc_v6_daddr +#define tw_rcv_saddr __tw_common.skc_rcv_saddr +#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr +#define tw_dport __tw_common.skc_dport +#define tw_num __tw_common.skc_num + int tw_timeout; volatile unsigned char tw_substate; - /* 3 bits hole, try to pack */ unsigned char tw_rcv_wscale; + /* Socket demultiplex comparisons on incoming packets. */ - /* these five are in inet_sock */ + /* these three are in inet_sock */ __be16 tw_sport; - __be32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES))); - __be32 tw_rcv_saddr; - __be16 tw_dport; - __u16 tw_num; + kmemcheck_bitfield_begin(flags); /* And these are ours. */ - __u8 tw_ipv6only:1; - /* 15 bits hole, try to pack */ - __u16 tw_ipv6_offset; - unsigned long tw_ttd; + unsigned int tw_ipv6only : 1, + tw_transparent : 1, + tw_flowlabel : 20, + tw_pad : 2, /* 2 bits hole */ + tw_tos : 8; + kmemcheck_bitfield_end(flags); + u32 tw_ttd; struct inet_bind_bucket *tw_tb; struct hlist_node tw_death_node; }; - -static inline void inet_twsk_add_node(struct inet_timewait_sock *tw, - struct hlist_head *list) -{ - hlist_add_head(&tw->tw_node, list); -} - -static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, - struct hlist_head *list) -{ - hlist_add_head(&tw->tw_bind_node, list); -} +#define tw_tclass tw_tos static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw) { @@ -174,37 +169,51 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw) } #define inet_twsk_for_each(tw, node, head) \ - hlist_for_each_entry(tw, node, head, tw_node) + hlist_nulls_for_each_entry(tw, node, head, tw_node) -#define inet_twsk_for_each_inmate(tw, node, jail) \ - hlist_for_each_entry(tw, node, jail, tw_death_node) +#define inet_twsk_for_each_inmate(tw, jail) \ + hlist_for_each_entry(tw, jail, tw_death_node) -#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \ - hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) +#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \ + hlist_for_each_entry_safe(tw, safe, jail, tw_death_node) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) { return (struct inet_timewait_sock *)sk; } -static inline __be32 inet_rcv_saddr(const struct sock *sk) -{ - return likely(sk->sk_state != TCP_TIME_WAIT) ? - inet_sk(sk)->rcv_saddr : inet_twsk(sk)->tw_rcv_saddr; -} +void inet_twsk_free(struct inet_timewait_sock *tw); +void inet_twsk_put(struct inet_timewait_sock *tw); + +int inet_twsk_unhash(struct inet_timewait_sock *tw); + +int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, + struct inet_hashinfo *hashinfo); -extern void inet_twsk_put(struct inet_timewait_sock *tw); +struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, + const int state); -extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, - const int state); +void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, + struct inet_hashinfo *hashinfo); -extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw, - struct sock *sk, - struct inet_hashinfo *hashinfo); +void inet_twsk_schedule(struct inet_timewait_sock *tw, + struct inet_timewait_death_row *twdr, + const int timeo, const int timewait_len); +void inet_twsk_deschedule(struct inet_timewait_sock *tw, + struct inet_timewait_death_row *twdr); -extern void inet_twsk_schedule(struct inet_timewait_sock *tw, - struct inet_timewait_death_row *twdr, - const int timeo, const int timewait_len); -extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, - struct inet_timewait_death_row *twdr); +void inet_twsk_purge(struct inet_hashinfo *hashinfo, + struct inet_timewait_death_row *twdr, int family); + +static inline +struct net *twsk_net(const struct inet_timewait_sock *twsk) +{ + return read_pnet(&twsk->tw_net); +} + +static inline +void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net) +{ + write_pnet(&twsk->tw_net, net); +} #endif /* _INET_TIMEWAIT_SOCK_ */ diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index ad8404b5611..01d590ee5e7 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -1,8 +1,6 @@ /* * INETPEER - A storage for permanent information about peers * - * Version: $Id: inetpeer.h,v 1.2 2002/01/12 07:54:56 davem Exp $ - * * Authors: Andrey V. Savochkin <saw@msu.ru> */ @@ -13,43 +11,164 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <linux/spinlock.h> -#include <asm/atomic.h> +#include <linux/rtnetlink.h> +#include <net/ipv6.h> +#include <linux/atomic.h> -struct inet_peer -{ +struct inetpeer_addr_base { + union { + __be32 a4; + __be32 a6[4]; + }; +}; + +struct inetpeer_addr { + struct inetpeer_addr_base addr; + __u16 family; +}; + +struct inet_peer { /* group together avl_left,avl_right,v4daddr to speedup lookups */ - struct inet_peer *avl_left, *avl_right; - __be32 v4daddr; /* peer's address */ - __u16 avl_height; - __u16 ip_id_count; /* IP ID for the next packet */ - struct list_head unused; - __u32 dtime; /* the time of last use of not - * referenced entries */ + struct inet_peer __rcu *avl_left, *avl_right; + struct inetpeer_addr daddr; + __u32 avl_height; + + u32 metrics[RTAX_MAX]; + u32 rate_tokens; /* rate limiting for ICMP */ + unsigned long rate_last; + union { + struct list_head gc_list; + struct rcu_head gc_rcu; + }; + /* + * Once inet_peer is queued for deletion (refcnt == -1), following field + * is not available: rid + * We can share memory with rcu_head to help keep inet_peer small. + */ + union { + struct { + atomic_t rid; /* Frag reception counter */ + }; + struct rcu_head rcu; + struct inet_peer *gc_next; + }; + + /* following fields might be frequently dirtied */ + __u32 dtime; /* the time of last use of not referenced entries */ atomic_t refcnt; - atomic_t rid; /* Frag reception counter */ - __u32 tcp_ts; - unsigned long tcp_ts_stamp; }; -void inet_initpeers(void) __init; +struct inet_peer_base { + struct inet_peer __rcu *root; + seqlock_t lock; + u32 flush_seq; + int total; +}; -/* can be called with or without local BH being disabled */ -struct inet_peer *inet_getpeer(__be32 daddr, int create); +#define INETPEER_BASE_BIT 0x1UL -/* can be called from BH context or outside */ -extern void inet_putpeer(struct inet_peer *p); +static inline struct inet_peer *inetpeer_ptr(unsigned long val) +{ + BUG_ON(val & INETPEER_BASE_BIT); + return (struct inet_peer *) val; +} + +static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val) +{ + if (!(val & INETPEER_BASE_BIT)) + return NULL; + val &= ~INETPEER_BASE_BIT; + return (struct inet_peer_base *) val; +} + +static inline bool inetpeer_ptr_is_peer(unsigned long val) +{ + return !(val & INETPEER_BASE_BIT); +} + +static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer) +{ + /* This implicitly clears INETPEER_BASE_BIT */ + *val = (unsigned long) peer; +} + +static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer) +{ + unsigned long val = (unsigned long) peer; + unsigned long orig = *ptr; + + if (!(orig & INETPEER_BASE_BIT) || + cmpxchg(ptr, orig, val) != orig) + return false; + return true; +} + +static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base) +{ + *ptr = (unsigned long) base | INETPEER_BASE_BIT; +} + +static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from) +{ + unsigned long val = *from; + + *to = val; + if (inetpeer_ptr_is_peer(val)) { + struct inet_peer *peer = inetpeer_ptr(val); + atomic_inc(&peer->refcnt); + } +} + +void inet_peer_base_init(struct inet_peer_base *); + +void inet_initpeers(void) __init; + +#define INETPEER_METRICS_NEW (~(u32) 0) + +static inline bool inet_metrics_new(const struct inet_peer *p) +{ + return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW; +} -extern spinlock_t inet_peer_idlock; /* can be called with or without local BH being disabled */ -static inline __u16 inet_getid(struct inet_peer *p, int more) +struct inet_peer *inet_getpeer(struct inet_peer_base *base, + const struct inetpeer_addr *daddr, + int create); + +static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, + __be32 v4daddr, + int create) +{ + struct inetpeer_addr daddr; + + daddr.addr.a4 = v4daddr; + daddr.family = AF_INET; + return inet_getpeer(base, &daddr, create); +} + +static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, + const struct in6_addr *v6daddr, + int create) { - __u16 id; + struct inetpeer_addr daddr; - spin_lock_bh(&inet_peer_idlock); - id = p->ip_id_count; - p->ip_id_count += 1 + more; - spin_unlock_bh(&inet_peer_idlock); - return id; + *(struct in6_addr *)daddr.addr.a6 = *v6daddr; + daddr.family = AF_INET6; + return inet_getpeer(base, &daddr, create); } +/* can be called from BH context or outside */ +void inet_putpeer(struct inet_peer *p); +bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); + +void inetpeer_invalidate_tree(struct inet_peer_base *); + +/* + * temporary check to make sure we dont access rid, tcp_ts, + * tcp_ts_stamp if no refcount is taken on inet_peer + */ +static inline void inet_peer_refcheck(const struct inet_peer *p) +{ + WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); +} #endif /* _NET_INETPEER_H */ diff --git a/include/net/ip.h b/include/net/ip.h index 9f50d4f1f15..7596eb22e1c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -28,12 +28,13 @@ #include <linux/skbuff.h> #include <net/inet_sock.h> +#include <net/route.h> #include <net/snmp.h> +#include <net/flow.h> struct sock; -struct inet_skb_parm -{ +struct inet_skb_parm { struct ip_options opt; /* Compiled IP options */ unsigned char flags; @@ -42,6 +43,8 @@ struct inet_skb_parm #define IPSKB_XFRM_TRANSFORMED 4 #define IPSKB_FRAG_COMPLETE 8 #define IPSKB_REROUTED 16 + + u16 frag_max_size; }; static inline unsigned int ip_hdrlen(const struct sk_buff *skb) @@ -49,24 +52,30 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb) return ip_hdr(skb)->ihl * 4; } -struct ipcm_cookie -{ +struct ipcm_cookie { __be32 addr; int oif; - struct ip_options *opt; + struct ip_options_rcu *opt; + __u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; }; #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) +#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) -struct ip_ra_chain -{ - struct ip_ra_chain *next; +struct ip_ra_chain { + struct ip_ra_chain __rcu *next; struct sock *sk; - void (*destructor)(struct sock *); + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct rcu_head rcu; }; -extern struct ip_ra_chain *ip_ra_chain; -extern rwlock_t ip_ra_lock; +extern struct ip_ra_chain __rcu *ip_ra_chain; /* IP flags. */ #define IP_CE 0x8000 /* Flag: "Congestion" */ @@ -82,114 +91,161 @@ struct packet_type; struct rtable; struct sockaddr; -extern int igmp_mc_proc_init(void); +int igmp_mc_init(void); /* * Functions provided by ip.c */ -extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, - __be32 saddr, __be32 daddr, - struct ip_options *opt); -extern int ip_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev); -extern int ip_local_deliver(struct sk_buff *skb); -extern int ip_mr_input(struct sk_buff *skb); -extern int ip_output(struct sk_buff *skb); -extern int ip_mc_output(struct sk_buff *skb); -extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); -extern int ip_do_nat(struct sk_buff *skb); -extern void ip_send_check(struct iphdr *ip); -extern int __ip_local_out(struct sk_buff *skb); -extern int ip_local_out(struct sk_buff *skb); -extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok); -extern void ip_init(void); -extern int ip_append_data(struct sock *sk, - int getfrag(void *from, char *to, int offset, int len, - int odd, struct sk_buff *skb), - void *from, int len, int protolen, - struct ipcm_cookie *ipc, - struct rtable *rt, - unsigned int flags); -extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb); -extern ssize_t ip_append_page(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -extern int ip_push_pending_frames(struct sock *sk); -extern void ip_flush_pending_frames(struct sock *sk); +int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, + __be32 saddr, __be32 daddr, + struct ip_options_rcu *opt); +int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, + struct net_device *orig_dev); +int ip_local_deliver(struct sk_buff *skb); +int ip_mr_input(struct sk_buff *skb); +int ip_output(struct sock *sk, struct sk_buff *skb); +int ip_mc_output(struct sock *sk, struct sk_buff *skb); +int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); +int ip_do_nat(struct sk_buff *skb); +void ip_send_check(struct iphdr *ip); +int __ip_local_out(struct sk_buff *skb); +int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); +static inline int ip_local_out(struct sk_buff *skb) +{ + return ip_local_out_sk(skb->sk, skb); +} -/* datagram.c */ -extern int ip4_datagram_connect(struct sock *sk, - struct sockaddr *uaddr, int addr_len); +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); +void ip_init(void); +int ip_append_data(struct sock *sk, struct flowi4 *fl4, + int getfrag(void *from, char *to, int offset, int len, + int odd, struct sk_buff *skb), + void *from, int len, int protolen, + struct ipcm_cookie *ipc, + struct rtable **rt, + unsigned int flags); +int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, + struct sk_buff *skb); +ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, + int offset, size_t size, int flags); +struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, + struct sk_buff_head *queue, + struct inet_cork *cork); +int ip_send_skb(struct net *net, struct sk_buff *skb); +int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); +void ip_flush_pending_frames(struct sock *sk); +struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, int length, int transhdrlen, + struct ipcm_cookie *ipc, struct rtable **rtp, + unsigned int flags); + +static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) +{ + return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); +} -/* - * Map a multicast IP onto multicast MAC for type Token Ring. - * This conforms to RFC1469 Option 2 Multicasting i.e. - * using a functional address to transmit / receive - * multicast packets. - */ +static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) +{ + return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); +} -static inline void ip_tr_mc_map(__be32 addr, char *buf) +static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x04; - buf[4]=0x00; - buf[5]=0x00; + return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); } +/* datagram.c */ +int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); + +void ip4_datagram_release_cb(struct sock *sk); + struct ip_reply_arg { struct kvec iov[1]; + int flags; __wsum csum; int csumoffset; /* u16 offset of csum in iov[0].iov_base */ /* -1 if not needed */ int bound_dev_if; + u8 tos; }; -void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg, - unsigned int len); +#define IP_REPLY_ARG_NOSRCCHECK 1 -struct ipv4_config +static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) { - int log_martians; - int no_pmtu_disc; -}; + return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; +} -extern struct ipv4_config ipv4_config; -DECLARE_SNMP_STAT(struct ipstats_mib, ip_statistics); -#define IP_INC_STATS(field) SNMP_INC_STATS(ip_statistics, field) -#define IP_INC_STATS_BH(field) SNMP_INC_STATS_BH(ip_statistics, field) -#define IP_INC_STATS_USER(field) SNMP_INC_STATS_USER(ip_statistics, field) -#define IP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(ip_statistics, field, val) -DECLARE_SNMP_STAT(struct linux_mib, net_statistics); -#define NET_INC_STATS(field) SNMP_INC_STATS(net_statistics, field) -#define NET_INC_STATS_BH(field) SNMP_INC_STATS_BH(net_statistics, field) -#define NET_INC_STATS_USER(field) SNMP_INC_STATS_USER(net_statistics, field) -#define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd) -#define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd) - -extern unsigned long snmp_fold_field(void *mib[], int offt); -extern int snmp_mib_init(void *ptr[2], size_t mibsize); -extern void snmp_mib_free(void *ptr[2]); - -extern void inet_get_local_port_range(int *low, int *high); - -extern int sysctl_ip_default_ttl; -extern int sysctl_ip_nonlocal_bind; +void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + __be32 saddr, const struct ip_reply_arg *arg, + unsigned int len); + +#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) +#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) +#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) +#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) +#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) +#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) +#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) +#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) +#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field) +#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) +#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) +#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) + +unsigned long snmp_fold_field(void __percpu *mib, int offt); +#if BITS_PER_LONG==32 +u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); +#else +static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) +{ + return snmp_fold_field(mib, offt); +} +#endif -extern struct ctl_path net_ipv4_ctl_path[]; +void inet_get_local_port_range(struct net *net, int *low, int *high); + +#ifdef CONFIG_SYSCTL +static inline int inet_is_local_reserved_port(struct net *net, int port) +{ + if (!net->ipv4.sysctl_local_reserved_ports) + return 0; + return test_bit(port, net->ipv4.sysctl_local_reserved_ports); +} +#else +static inline int inet_is_local_reserved_port(struct net *net, int port) +{ + return 0; +} +#endif + +extern int sysctl_ip_nonlocal_bind; /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; -extern int inet_peer_gc_mintime; -extern int inet_peer_gc_maxtime; + +/* From ip_input.c */ +extern int sysctl_ip_early_demux; /* From ip_output.c */ extern int sysctl_ip_dynaddr; -extern void ipfrag_init(void); +void ipfrag_init(void); + +void ip_static_sysctl_init(void); + +#define IP4_REPLY_MARK(net, mark) \ + ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) + +static inline bool ip_is_fragment(const struct iphdr *iph) +{ + return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; +} #ifdef CONFIG_INET #include <net/dst.h> @@ -208,37 +264,84 @@ int ip_decrease_ttl(struct iphdr *iph) static inline int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) { - return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || + return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && - !(dst_metric(dst, RTAX_LOCK)&(1<<RTAX_MTU)))); + !(dst_metric_locked(dst, RTAX_MTU))); } -extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); +static inline bool ip_sk_accept_pmtu(const struct sock *sk) +{ + return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && + inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; +} -static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk) +static inline bool ip_sk_use_pmtu(const struct sock *sk) { - if (iph->frag_off & htons(IP_DF)) { + return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; +} + +static inline bool ip_sk_ignore_df(const struct sock *sk) +{ + return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || + inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; +} + +static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + bool forwarding) +{ + struct net *net = dev_net(dst->dev); + + if (net->ipv4.sysctl_ip_fwd_use_pmtu || + dst_metric_locked(dst, RTAX_MTU) || + !forwarding) + return dst_mtu(dst); + + return min(dst->dev->mtu, IP_MAX_MTU); +} + +static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) +{ + if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { + bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + } else { + return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); + } +} + +u32 ip_idents_reserve(u32 hash, int segs); +void __ip_select_ident(struct iphdr *iph, int segs); + +static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) +{ + struct iphdr *iph = ip_hdr(skb); + + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { /* This is only to work around buggy Windows95/2000 * VJ compression implementations. If the ID field * does not change, they drop every other packet in * a TCP stream using header compression. */ - iph->id = (sk && inet_sk(sk)->daddr) ? - htons(inet_sk(sk)->id++) : 0; - } else - __ip_select_ident(iph, dst, 0); + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); + inet_sk(sk)->inet_id += segs; + } else { + iph->id = 0; + } + } else { + __ip_select_ident(iph, segs); + } } -static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more) +static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) { - if (iph->frag_off & htons(IP_DF)) { - if (sk && inet_sk(sk)->daddr) { - iph->id = htons(inet_sk(sk)->id); - inet_sk(sk)->id += 1 + more; - } else - iph->id = 0; - } else - __ip_select_ident(iph, dst, more); + ip_select_ident_segs(skb, sk, 1); +} + +static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) +{ + return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, + skb->len, proto, 0); } /* @@ -294,43 +397,80 @@ static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, ch buf[16] = addr & 0x0f; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) +{ + if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) + memcpy(buf, broadcast, 4); + else + memcpy(buf, &naddr, sizeof(naddr)); +} + +#if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif static __inline__ void inet_reset_saddr(struct sock *sk) { - inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; +#if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&np->saddr, 0, sizeof(np->saddr)); - memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr)); + memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); } #endif } #endif -extern int ip_call_ra_chain(struct sk_buff *skb); +static inline int sk_mc_loop(struct sock *sk) +{ + if (!sk) + return 1; + switch (sk->sk_family) { + case AF_INET: + return inet_sk(sk)->mc_loop; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + return inet6_sk(sk)->mc_loop; +#endif + } + WARN_ON(1); + return 1; +} + +bool ip_call_ra_chain(struct sk_buff *skb); /* * Functions provided by ip_fragment.c */ -enum ip_defrag_users -{ +enum ip_defrag_users { IP_DEFRAG_LOCAL_DELIVER, IP_DEFRAG_CALL_RA_CHAIN, IP_DEFRAG_CONNTRACK_IN, + __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP_DEFRAG_CONNTRACK_OUT, + __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, + IP_DEFRAG_CONNTRACK_BRIDGE_IN, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, IP_DEFRAG_VS_IN, IP_DEFRAG_VS_OUT, - IP_DEFRAG_VS_FWD + IP_DEFRAG_VS_FWD, + IP_DEFRAG_AF_PACKET, + IP_DEFRAG_MACVLAN, }; int ip_defrag(struct sk_buff *skb, u32 user); +#ifdef CONFIG_INET +struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); +#else +static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) +{ + return skb; +} +#endif int ip_frag_mem(struct net *net); int ip_frag_nqueues(struct net *net); @@ -338,55 +478,53 @@ int ip_frag_nqueues(struct net *net); * Functions provided by ip_forward.c */ -extern int ip_forward(struct sk_buff *skb); +int ip_forward(struct sk_buff *skb); /* * Functions provided by ip_options.c */ -extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); -extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); -extern void ip_options_fragment(struct sk_buff *skb); -extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb); -extern int ip_options_get(struct ip_options **optp, - unsigned char *data, int optlen); -extern int ip_options_get_from_user(struct ip_options **optp, - unsigned char __user *data, int optlen); -extern void ip_options_undo(struct ip_options * opt); -extern void ip_forward_options(struct sk_buff *skb); -extern int ip_options_rcv_srr(struct sk_buff *skb); +void ip_options_build(struct sk_buff *skb, struct ip_options *opt, + __be32 daddr, struct rtable *rt, int is_frag); +int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); +void ip_options_fragment(struct sk_buff *skb); +int ip_options_compile(struct net *net, struct ip_options *opt, + struct sk_buff *skb); +int ip_options_get(struct net *net, struct ip_options_rcu **optp, + unsigned char *data, int optlen); +int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, + unsigned char __user *data, int optlen); +void ip_options_undo(struct ip_options *opt); +void ip_forward_options(struct sk_buff *skb); +int ip_options_rcv_srr(struct sk_buff *skb); /* * Functions provided by ip_sockglue.c */ -extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); -extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc); -extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); -extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -extern int compat_ip_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, int optlen); -extern int compat_ip_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, int __user *optlen); -extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); - -extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); -extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, - __be16 port, u32 info, u8 *payload); -extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, - u32 info); - -/* sysctl helpers - any sysctl which holds a value that ends up being - * fed into the routing cache should use these handlers. - */ -int ipv4_doint_and_flush(ctl_table *ctl, int write, - struct file* filp, void __user *buffer, - size_t *lenp, loff_t *ppos); -int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen); +void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); +void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); +int ip_cmsg_send(struct net *net, struct msghdr *msg, + struct ipcm_cookie *ipc, bool allow_ipv6); +int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, + unsigned int optlen); +int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, + int __user *optlen); +int compat_ip_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +int compat_ip_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int ip_ra_control(struct sock *sk, unsigned char on, + void (*destructor)(struct sock *)); + +int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); +void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, + u32 info, u8 *payload); +void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, + u32 info); + #ifdef CONFIG_PROC_FS -extern int ip_misc_proc_init(void); +int ip_misc_proc_init(void); #endif #endif /* _IP_H */ diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index bc1b0fda2b0..55236cb7117 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -31,64 +31,68 @@ #include <net/ip.h> #include <asm/checksum.h> #include <linux/in6.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> #ifndef _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum csum); +#endif -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum csum) +static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) { + return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len, proto, 0)); +} - int carry; - __u32 ulen; - __u32 uproto; - __u32 sum = (__force u32)csum; - - sum += (__force u32)saddr->s6_addr32[0]; - carry = (sum < (__force u32)saddr->s6_addr32[0]); - sum += carry; - - sum += (__force u32)saddr->s6_addr32[1]; - carry = (sum < (__force u32)saddr->s6_addr32[1]); - sum += carry; - - sum += (__force u32)saddr->s6_addr32[2]; - carry = (sum < (__force u32)saddr->s6_addr32[2]); - sum += carry; - - sum += (__force u32)saddr->s6_addr32[3]; - carry = (sum < (__force u32)saddr->s6_addr32[3]); - sum += carry; - - sum += (__force u32)daddr->s6_addr32[0]; - carry = (sum < (__force u32)daddr->s6_addr32[0]); - sum += carry; - - sum += (__force u32)daddr->s6_addr32[1]; - carry = (sum < (__force u32)daddr->s6_addr32[1]); - sum += carry; - - sum += (__force u32)daddr->s6_addr32[2]; - carry = (sum < (__force u32)daddr->s6_addr32[2]); - sum += carry; +static __inline__ __sum16 tcp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); +} - sum += (__force u32)daddr->s6_addr32[3]; - carry = (sum < (__force u32)daddr->s6_addr32[3]); - sum += carry; +static inline void __tcp_v6_send_check(struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr) +{ + struct tcphdr *th = tcp_hdr(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + } else { + th->check = tcp_v6_check(skb->len, saddr, daddr, + csum_partial(th, th->doff << 2, + skb->csum)); + } +} - ulen = (__force u32)htonl((__u32) len); - sum += ulen; - carry = (sum < ulen); - sum += carry; +#if IS_ENABLED(CONFIG_IPV6) +static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) +{ + struct ipv6_pinfo *np = inet6_sk(sk); - uproto = (__force u32)htonl(proto); - sum += uproto; - carry = (sum < uproto); - sum += carry; + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); +} +#endif - return csum_fold((__force __wsum)sum); +static inline __sum16 udp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base); } -#endif +void udp6_set_csum(bool nocheck, struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr, int len); + +int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto); #endif diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 953d6040ff5..9bcb220bd4a 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -13,19 +13,23 @@ #ifndef _IP6_FIB_H #define _IP6_FIB_H -#ifdef __KERNEL__ - #include <linux/ipv6_route.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <net/dst.h> #include <net/flow.h> #include <net/netlink.h> +#include <net/inetpeer.h> + +#ifdef CONFIG_IPV6_MULTIPLE_TABLES +#define FIB6_TABLE_HASHSZ 256 +#else +#define FIB6_TABLE_HASHSZ 1 +#endif struct rt6_info; -struct fib6_config -{ +struct fib6_config { u32 fc_table; u32 fc_metric; int fc_dst_len; @@ -33,20 +37,23 @@ struct fib6_config int fc_ifindex; u32 fc_flags; u32 fc_protocol; + u32 fc_type; /* only 8 bits are used */ struct in6_addr fc_dst; struct in6_addr fc_src; + struct in6_addr fc_prefsrc; struct in6_addr fc_gateway; unsigned long fc_expires; struct nlattr *fc_mx; int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; struct nl_info fc_nlinfo; }; -struct fib6_node -{ +struct fib6_node { struct fib6_node *parent; struct fib6_node *left; struct fib6_node *right; @@ -72,62 +79,140 @@ struct fib6_node * */ -struct rt6key -{ +struct rt6key { struct in6_addr addr; int plen; }; struct fib6_table; -struct rt6_info -{ - union { - struct dst_entry dst; - } u; - - struct inet6_dev *rt6i_idev; - -#define rt6i_dev u.dst.dev -#define rt6i_nexthop u.dst.neighbour -#define rt6i_expires u.dst.expires +struct rt6_info { + struct dst_entry dst; + /* + * Tail elements of dst_entry (__refcnt etc.) + * and these elements (rarely used in hot path) are in + * the same cache line. + */ + struct fib6_table *rt6i_table; struct fib6_node *rt6i_node; struct in6_addr rt6i_gateway; - + + /* Multipath routes: + * siblings is a list of rt6_info that have the the same metric/weight, + * destination, but not the same gateway. nsiblings is just a cache + * to speed up lookup. + */ + struct list_head rt6i_siblings; + unsigned int rt6i_nsiblings; + + atomic_t rt6i_ref; + + /* These are in a separate cache line. */ + struct rt6key rt6i_dst ____cacheline_aligned_in_smp; u32 rt6i_flags; + struct rt6key rt6i_src; + struct rt6key rt6i_prefsrc; u32 rt6i_metric; - atomic_t rt6i_ref; + + struct inet6_dev *rt6i_idev; + unsigned long _rt6i_peer; + + u32 rt6i_genid; /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; u8 rt6i_protocol; +}; - struct fib6_table *rt6i_table; +static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt) +{ + return inetpeer_ptr(rt->_rt6i_peer); +} - struct rt6key rt6i_dst; +static inline bool rt6_has_peer(struct rt6_info *rt) +{ + return inetpeer_ptr_is_peer(rt->_rt6i_peer); +} -#ifdef CONFIG_XFRM - u32 rt6i_flow_cache_genid; -#endif +static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) +{ + __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); +} - struct rt6key rt6i_src; -}; +static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) +{ + return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); +} + +static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base) +{ + inetpeer_init_ptr(&rt->_rt6i_peer, base); +} + +static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort) +{ + inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer); +} static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) { return ((struct rt6_info *)dst)->rt6i_idev; } -struct fib6_walker_t +static inline void rt6_clean_expires(struct rt6_info *rt) +{ + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.expires = 0; +} + +static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) +{ + rt->dst.expires = expires; + rt->rt6i_flags |= RTF_EXPIRES; +} + +static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) +{ + struct rt6_info *rt; + + for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); + rt = (struct rt6_info *)rt->dst.from); + if (rt && rt != rt0) + rt0->dst.expires = rt->dst.expires; + + dst_set_expires(&rt0->dst, timeout); + rt0->rt6i_flags |= RTF_EXPIRES; +} + +static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) { - struct fib6_walker_t *prev, *next; + struct dst_entry *new = (struct dst_entry *) from; + + rt->rt6i_flags &= ~RTF_EXPIRES; + dst_hold(new); + rt->dst.from = new; +} + +static inline void ip6_rt_put(struct rt6_info *rt) +{ + /* dst_release() accepts a NULL parameter. + * We rely on dst being first structure in struct rt6_info + */ + BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0); + dst_release(&rt->dst); +} + +struct fib6_walker_t { + struct list_head lh; struct fib6_node *root, *node; struct rt6_info *leaf; unsigned char state; unsigned char prune; + unsigned int skip; + unsigned int count; int (*func)(struct fib6_walker_t *); void *args; }; @@ -156,6 +241,7 @@ struct fib6_table { u32 tb6_id; rwlock_t tb6_lock; struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; }; #define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC @@ -174,48 +260,48 @@ struct fib6_table { #define RT6_TABLE_LOCAL RT6_TABLE_MAIN #endif -typedef struct rt6_info *(*pol_lookup_t)(struct fib6_table *, - struct flowi *, int); +typedef struct rt6_info *(*pol_lookup_t)(struct net *, + struct fib6_table *, + struct flowi6 *, int); /* * exported functions */ -extern struct fib6_table * fib6_get_table(u32 id); -extern struct fib6_table * fib6_new_table(u32 id); -extern struct dst_entry * fib6_rule_lookup(struct flowi *fl, int flags, - pol_lookup_t lookup); +struct fib6_table *fib6_get_table(struct net *net, u32 id); +struct fib6_table *fib6_new_table(struct net *net, u32 id); +struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, + int flags, pol_lookup_t lookup); + +struct fib6_node *fib6_lookup(struct fib6_node *root, + const struct in6_addr *daddr, + const struct in6_addr *saddr); -extern struct fib6_node *fib6_lookup(struct fib6_node *root, - struct in6_addr *daddr, - struct in6_addr *saddr); +struct fib6_node *fib6_locate(struct fib6_node *root, + const struct in6_addr *daddr, int dst_len, + const struct in6_addr *saddr, int src_len); -struct fib6_node *fib6_locate(struct fib6_node *root, - struct in6_addr *daddr, int dst_len, - struct in6_addr *saddr, int src_len); +void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), + void *arg); -extern void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), - int prune, void *arg); +int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, + struct nlattr *mx, int mx_len); -extern int fib6_add(struct fib6_node *root, - struct rt6_info *rt, - struct nl_info *info); +int fib6_del(struct rt6_info *rt, struct nl_info *info); -extern int fib6_del(struct rt6_info *rt, - struct nl_info *info); +void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); -extern void inet6_rt_notify(int event, struct rt6_info *rt, - struct nl_info *info); +void fib6_run_gc(unsigned long expires, struct net *net, bool force); -extern void fib6_run_gc(unsigned long dummy); +void fib6_gc_cleanup(void); -extern void fib6_gc_cleanup(void); +int fib6_init(void); -extern int fib6_init(void); +int ipv6_route_open(struct inode *inode, struct file *file); #ifdef CONFIG_IPV6_MULTIPLE_TABLES -extern int fib6_rules_init(void); -extern void fib6_rules_cleanup(void); +int fib6_rules_init(void); +void fib6_rules_cleanup(void); #else static inline int fib6_rules_init(void) { @@ -227,4 +313,3 @@ static inline void fib6_rules_cleanup(void) } #endif #endif -#endif diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f99e4f0f568..1d09b46c1e4 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -1,10 +1,6 @@ #ifndef _NET_IP6_ROUTE_H #define _NET_IP6_ROUTE_H -#define IP6_RT_PRIO_USER 1024 -#define IP6_RT_PRIO_ADDRCONF 256 -#define IP6_RT_PRIO_KERN 512 - struct route_info { __u8 type; __u8 length; @@ -22,115 +18,124 @@ struct route_info { __u8 prefix[0]; /* 0,8 or 16 */ }; -#ifdef __KERNEL__ - #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/route.h> -#define RT6_LOOKUP_F_IFACE 0x1 -#define RT6_LOOKUP_F_REACHABLE 0x2 -#define RT6_LOOKUP_F_HAS_SADDR 0x4 +#define RT6_LOOKUP_F_IFACE 0x00000001 +#define RT6_LOOKUP_F_REACHABLE 0x00000002 +#define RT6_LOOKUP_F_HAS_SADDR 0x00000004 +#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 +#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 +#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 -extern struct rt6_info ip6_null_entry; +/* We do not (yet ?) support IPv6 jumbograms (RFC 2675) + * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header + */ +#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) -#ifdef CONFIG_IPV6_MULTIPLE_TABLES -extern struct rt6_info ip6_prohibit_entry; -extern struct rt6_info ip6_blk_hole_entry; -#endif +/* + * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate + * between IPV6_ADDR_PREFERENCES socket option values + * IPV6_PREFER_SRC_TMP = 0x1 + * IPV6_PREFER_SRC_PUBLIC = 0x2 + * IPV6_PREFER_SRC_COA = 0x4 + * and above RT6_LOOKUP_F_SRCPREF_xxx flags. + */ +static inline int rt6_srcprefs2flags(unsigned int srcprefs) +{ + /* No need to bitmask because srcprefs have only 3 bits. */ + return srcprefs << 3; +} + +static inline unsigned int rt6_flags2srcprefs(int flags) +{ + return (flags >> 3) & 7; +} -extern void ip6_route_input(struct sk_buff *skb); +static inline bool rt6_need_strict(const struct in6_addr *daddr) +{ + return ipv6_addr_type(daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); +} -extern struct dst_entry * ip6_route_output(struct sock *sk, - struct flowi *fl); +void ip6_route_input(struct sk_buff *skb); -extern int ip6_route_init(void); -extern void ip6_route_cleanup(void); +struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, + struct flowi6 *fl6); +struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, + int flags); -extern int ipv6_route_ioctl(unsigned int cmd, void __user *arg); +int ip6_route_init(void); +void ip6_route_cleanup(void); -extern int ip6_route_add(struct fib6_config *cfg); -extern int ip6_ins_rt(struct rt6_info *); -extern int ip6_del_rt(struct rt6_info *); +int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); -extern int ip6_rt_addr_add(struct in6_addr *addr, - struct net_device *dev, - int anycast); +int ip6_route_add(struct fib6_config *cfg); +int ip6_ins_rt(struct rt6_info *); +int ip6_del_rt(struct rt6_info *); -extern int ip6_rt_addr_del(struct in6_addr *addr, - struct net_device *dev); +int ip6_route_get_saddr(struct net *net, struct rt6_info *rt, + const struct in6_addr *daddr, unsigned int prefs, + struct in6_addr *saddr); -extern void rt6_sndmsg(int type, struct in6_addr *dst, - struct in6_addr *src, - struct in6_addr *gw, - struct net_device *dev, - int dstlen, int srclen, - int metric, __u32 flags); +struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, + const struct in6_addr *saddr, int oif, int flags); -extern struct rt6_info *rt6_lookup(struct in6_addr *daddr, - struct in6_addr *saddr, - int oif, int flags); +struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6); +int icmp6_dst_gc(void); -extern struct dst_entry *ndisc_dst_alloc(struct net_device *dev, - struct neighbour *neigh, - struct in6_addr *addr, - int (*output)(struct sk_buff *)); -extern int ndisc_dst_gc(int *more); -extern void fib6_force_start_gc(void); +void fib6_force_start_gc(struct net *net); -extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, - const struct in6_addr *addr, - int anycast); +struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, + const struct in6_addr *addr, bool anycast); /* * support functions for ND * */ -extern struct rt6_info * rt6_get_dflt_router(struct in6_addr *addr, - struct net_device *dev); -extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr, - struct net_device *dev, - unsigned int pref); - -extern void rt6_purge_dflt_routers(void); - -extern int rt6_route_rcv(struct net_device *dev, - u8 *opt, int len, - struct in6_addr *gwaddr); - -extern void rt6_redirect(struct in6_addr *dest, - struct in6_addr *src, - struct in6_addr *saddr, - struct neighbour *neigh, - u8 *lladdr, - int on_link); - -extern void rt6_pmtu_discovery(struct in6_addr *daddr, - struct in6_addr *saddr, - struct net_device *dev, - u32 pmtu); +struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, + struct net_device *dev); +struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, + struct net_device *dev, unsigned int pref); + +void rt6_purge_dflt_routers(struct net *net); + +int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, + const struct in6_addr *gwaddr); + +void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, + u32 mark); +void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); +void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); +void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, + u32 mark); +void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); struct netlink_callback; -struct rt6_rtnl_dump_arg -{ +struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; + struct net *net; }; -extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); -extern void rt6_ifdown(struct net_device *dev); -extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); +int rt6_dump_route(struct rt6_info *rt, void *p_arg); +void rt6_ifdown(struct net *net, struct net_device *dev); +void rt6_mtu_change(struct net_device *dev, unsigned int mtu); +void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); -extern rwlock_t rt6_lock; /* * Store a destination cache entry in a socket */ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, - struct in6_addr *daddr, struct in6_addr *saddr) + const struct in6_addr *daddr, + const struct in6_addr *saddr) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt = (struct rt6_info *) dst; @@ -146,17 +151,50 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, struct in6_addr *daddr, struct in6_addr *saddr) { - write_lock(&sk->sk_dst_lock); + spin_lock(&sk->sk_dst_lock); __ip6_dst_store(sk, dst, daddr, saddr); - write_unlock(&sk->sk_dst_lock); + spin_unlock(&sk->sk_dst_lock); } -static inline int ipv6_unicast_destination(struct sk_buff *skb) +static inline bool ipv6_unicast_destination(const struct sk_buff *skb) { - struct rt6_info *rt = (struct rt6_info *) skb->dst; + struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); return rt->rt6i_flags & RTF_LOCAL; } -#endif +static inline bool ipv6_anycast_destination(const struct sk_buff *skb) +{ + struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); + + return rt->rt6i_flags & RTF_ANYCAST; +} + +int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); + +static inline int ip6_skb_dst_mtu(struct sk_buff *skb) +{ + struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; + + return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? + skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); +} + +static inline bool ip6_sk_accept_pmtu(const struct sock *sk) +{ + return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE && + inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; +} + +static inline bool ip6_sk_ignore_df(const struct sock *sk) +{ + return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || + inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; +} + +static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) +{ + return &rt->rt6i_gateway; +} + #endif diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 6512d85f11b..a5593dab6af 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -1,30 +1,55 @@ -/* - * $Id$ - */ - #ifndef _NET_IP6_TUNNEL_H #define _NET_IP6_TUNNEL_H #include <linux/ipv6.h> #include <linux/netdevice.h> +#include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> +#define IP6TUNNEL_ERR_TIMEO (30*HZ) + /* capable of sending packets */ #define IP6_TNL_F_CAP_XMIT 0x10000 /* capable of receiving packets */ #define IP6_TNL_F_CAP_RCV 0x20000 +/* determine capability on a per-packet basis */ +#define IP6_TNL_F_CAP_PER_PACKET 0x40000 -/* IPv6 tunnel */ +struct __ip6_tnl_parm { + char name[IFNAMSIZ]; /* name of tunnel device */ + int link; /* ifindex of underlying L2 interface */ + __u8 proto; /* tunnel protocol */ + __u8 encap_limit; /* encapsulation limit for tunnel */ + __u8 hop_limit; /* hop limit for tunnel */ + __be32 flowinfo; /* traffic class and flowlabel for tunnel */ + __u32 flags; /* tunnel flags */ + struct in6_addr laddr; /* local tunnel end-point address */ + struct in6_addr raddr; /* remote tunnel end-point address */ + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; +}; + +/* IPv6 tunnel */ struct ip6_tnl { - struct ip6_tnl *next; /* next tunnel in list */ + struct ip6_tnl __rcu *next; /* next tunnel in list */ struct net_device *dev; /* virtual device associated with tunnel */ - struct net_device_stats stat; /* statistics for tunnel device */ - int recursion; /* depth of hard_start_xmit recursion */ - struct ip6_tnl_parm parms; /* tunnel configuration parameters */ + struct net *net; /* netns for packet i/o */ + struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ struct dst_entry *dst_cache; /* cached dst */ u32 dst_cookie; + + int err_count; + unsigned long err_time; + + /* These fields used only by GRE */ + __u32 i_seqno; /* The last seen seqno */ + __u32 o_seqno; /* The last output seqno */ + int hlen; /* Precalculated GRE header length */ + int mlink; }; /* Tunnel encapsulation limit destination sub-option */ @@ -33,6 +58,35 @@ struct ipv6_tlv_tnl_enc_lim { __u8 type; /* type-code for option */ __u8 length; /* option length */ __u8 encap_limit; /* tunnel encapsulation limit */ -} __attribute__ ((packed)); +} __packed; + +struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); +void ip6_tnl_dst_reset(struct ip6_tnl *t); +void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); +int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, + const struct in6_addr *raddr); +int ip6_tnl_xmit_ctl(struct ip6_tnl *t); +__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); +__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, + const struct in6_addr *raddr); + +static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + int pkt_len, err; + + pkt_len = skb->len; + err = ip6_local_out(skb); + if (net_xmit_eval(err) == 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes += pkt_len; + tstats->tx_packets++; + u64_stats_update_end(&tstats->syncp); + } else { + stats->tx_errors++; + stats->tx_aborted_errors++; + } +} #endif diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 8b12667f7a2..9922093f575 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -18,7 +18,10 @@ #include <net/flow.h> #include <linux/seq_file.h> +#include <linux/rcupdate.h> #include <net/fib_rules.h> +#include <net/inetpeer.h> +#include <linux/percpu.h> struct fib_config { u8 fc_dst_len; @@ -44,22 +47,47 @@ struct fib_config { }; struct fib_info; +struct rtable; + +struct fib_nh_exception { + struct fib_nh_exception __rcu *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + __be32 fnhe_gw; + unsigned long fnhe_expires; + struct rtable __rcu *fnhe_rth_input; + struct rtable __rcu *fnhe_rth_output; + unsigned long fnhe_stamp; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception __rcu *chain; +}; + +#define FNHE_HASH_SIZE 2048 +#define FNHE_RECLAIM_DEPTH 5 struct fib_nh { struct net_device *nh_dev; struct hlist_node nh_hash; struct fib_info *nh_parent; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; #ifdef CONFIG_IP_ROUTE_MULTIPATH int nh_weight; int nh_power; #endif -#ifdef CONFIG_NET_CLS_ROUTE +#ifdef CONFIG_IP_ROUTE_CLASSID __u32 nh_tclassid; #endif int nh_oif; __be32 nh_gw; + __be32 nh_saddr; + int nh_saddr_genid; + struct rtable __rcu * __percpu *nh_pcpu_rth_output; + struct rtable __rcu *nh_rth_input; + struct fnhe_hash_bucket *nh_exceptions; }; /* @@ -72,12 +100,14 @@ struct fib_info { struct net *fib_net; int fib_treeref; atomic_t fib_clntref; - int fib_dead; - unsigned fib_flags; - int fib_protocol; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; __be32 fib_prefsrc; u32 fib_priority; - u32 fib_metrics[RTAX_MAX]; + u32 *fib_metrics; #define fib_mtu fib_metrics[RTAX_MTU-1] #define fib_window fib_metrics[RTAX_WINDOW-1] #define fib_rtt fib_metrics[RTAX_RTT-1] @@ -86,6 +116,7 @@ struct fib_info { #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_power; #endif + struct rcu_head rcu; struct fib_nh fib_nh[0]; #define fib_dev fib_nh[0].nh_dev }; @@ -95,15 +126,16 @@ struct fib_info { struct fib_rule; #endif +struct fib_table; struct fib_result { unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; + u32 tclassid; struct fib_info *fi; -#ifdef CONFIG_IP_MULTIPLE_TABLES - struct fib_rule *r; -#endif + struct fib_table *table; + struct list_head *fa_head; }; struct fib_result_nl { @@ -122,43 +154,50 @@ struct fib_result_nl { }; #ifdef CONFIG_IP_ROUTE_MULTIPATH - #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) -#define FIB_RES_RESET(res) ((res).nh_sel = 0) - -#define FIB_TABLE_HASHSZ 2 - #else /* CONFIG_IP_ROUTE_MULTIPATH */ - #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) -#define FIB_RES_RESET(res) +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +#ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 +#else +#define FIB_TABLE_HASHSZ 2 +#endif -#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); -#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res)) +#define FIB_RES_SADDR(net, res) \ + ((FIB_RES_NH(res).nh_saddr_genid == \ + atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ + FIB_RES_NH(res).nh_saddr : \ + fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) #define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) #define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) +#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \ + FIB_RES_SADDR(net, res)) + struct fib_table { - struct hlist_node tb_hlist; - u32 tb_id; - unsigned tb_stamp; - int tb_default; - int (*tb_lookup)(struct fib_table *tb, const struct flowi *flp, struct fib_result *res); - int (*tb_insert)(struct fib_table *, struct fib_config *); - int (*tb_delete)(struct fib_table *, struct fib_config *); - int (*tb_dump)(struct fib_table *table, struct sk_buff *skb, - struct netlink_callback *cb); - int (*tb_flush)(struct fib_table *table); - void (*tb_select_default)(struct fib_table *table, - const struct flowi *flp, struct fib_result *res); - - unsigned char tb_data[0]; + struct hlist_node tb_hlist; + u32 tb_id; + int tb_default; + int tb_num_default; + unsigned long tb_data[0]; }; +int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, + struct fib_result *res, int fib_flags); +int fib_table_insert(struct fib_table *, struct fib_config *); +int fib_table_delete(struct fib_table *, struct fib_config *); +int fib_table_dump(struct fib_table *table, struct sk_buff *skb, + struct netlink_callback *cb); +int fib_table_flush(struct fib_table *table); +void fib_free_table(struct fib_table *tb); + + + #ifndef CONFIG_IP_MULTIPLE_TABLES #define TABLE_LOCAL_INDEX 0 @@ -179,65 +218,94 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id) return fib_get_table(net, id); } -static inline int fib_lookup(struct net *net, const struct flowi *flp, +static inline int fib_lookup(struct net *net, const struct flowi4 *flp, struct fib_result *res) { struct fib_table *table; table = fib_get_table(net, RT_TABLE_LOCAL); - if (!table->tb_lookup(table, flp, res)) + if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF)) return 0; table = fib_get_table(net, RT_TABLE_MAIN); - if (!table->tb_lookup(table, flp, res)) + if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF)) return 0; return -ENETUNREACH; } #else /* CONFIG_IP_MULTIPLE_TABLES */ -extern int __net_init fib4_rules_init(struct net *net); -extern void __net_exit fib4_rules_exit(struct net *net); +int __net_init fib4_rules_init(struct net *net); +void __net_exit fib4_rules_exit(struct net *net); -#ifdef CONFIG_NET_CLS_ROUTE -extern u32 fib_rules_tclass(struct fib_result *res); -#endif +struct fib_table *fib_new_table(struct net *net, u32 id); +struct fib_table *fib_get_table(struct net *net, u32 id); -extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res); +int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res); -extern struct fib_table *fib_new_table(struct net *net, u32 id); -extern struct fib_table *fib_get_table(struct net *net, u32 id); +static inline int fib_lookup(struct net *net, struct flowi4 *flp, + struct fib_result *res) +{ + if (!net->ipv4.fib_has_custom_rules) { + res->tclassid = 0; + if (net->ipv4.fib_local && + !fib_table_lookup(net->ipv4.fib_local, flp, res, + FIB_LOOKUP_NOREF)) + return 0; + if (net->ipv4.fib_main && + !fib_table_lookup(net->ipv4.fib_main, flp, res, + FIB_LOOKUP_NOREF)) + return 0; + if (net->ipv4.fib_default && + !fib_table_lookup(net->ipv4.fib_default, flp, res, + FIB_LOOKUP_NOREF)) + return 0; + return -ENETUNREACH; + } + return __fib_lookup(net, flp, res); +} #endif /* CONFIG_IP_MULTIPLE_TABLES */ /* Exported by fib_frontend.c */ extern const struct nla_policy rtm_ipv4_policy[]; -extern void ip_fib_init(void); -extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, - struct net_device *dev, __be32 *spec_dst, u32 *itag); -extern void fib_select_default(struct net *net, const struct flowi *flp, - struct fib_result *res); +void ip_fib_init(void); +__be32 fib_compute_spec_dst(struct sk_buff *skb); +int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, + u8 tos, int oif, struct net_device *dev, + struct in_device *idev, u32 *itag); +void fib_select_default(struct fib_result *res); +#ifdef CONFIG_IP_ROUTE_CLASSID +static inline int fib_num_tclassid_users(struct net *net) +{ + return net->ipv4.fib_num_tclassid_users; +} +#else +static inline int fib_num_tclassid_users(struct net *net) +{ + return 0; +} +#endif /* Exported by fib_semantics.c */ -extern int ip_fib_check_default(__be32 gw, struct net_device *dev); -extern int fib_sync_down_dev(struct net_device *dev, int force); -extern int fib_sync_down_addr(struct net *net, __be32 local); -extern int fib_sync_up(struct net_device *dev); -extern __be32 __fib_res_prefsrc(struct fib_result *res); -extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res); - -/* Exported by fib_{hash|trie}.c */ -extern void fib_hash_init(void); -extern struct fib_table *fib_hash_table(u32 id); - -static inline void fib_combine_itag(u32 *itag, struct fib_result *res) +int ip_fib_check_default(__be32 gw, struct net_device *dev); +int fib_sync_down_dev(struct net_device *dev, int force); +int fib_sync_down_addr(struct net *net, __be32 local); +int fib_sync_up(struct net_device *dev); +void fib_select_multipath(struct fib_result *res); + +/* Exported by fib_trie.c */ +void fib_trie_init(void); +struct fib_table *fib_trie_table(u32 id); + +static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { -#ifdef CONFIG_NET_CLS_ROUTE +#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_MULTIPLE_TABLES u32 rtag; #endif *itag = FIB_RES_NH(*res).nh_tclassid<<16; #ifdef CONFIG_IP_MULTIPLE_TABLES - rtag = fib_rules_tclass(res); + rtag = res->tclassid; if (*itag == 0) *itag = (rtag<<16); *itag |= (rtag>>16); @@ -245,7 +313,7 @@ static inline void fib_combine_itag(u32 *itag, struct fib_result *res) #endif } -extern void free_fib_info(struct fib_info *fi); +void free_fib_info(struct fib_info *fi); static inline void fib_info_put(struct fib_info *fi) { @@ -253,19 +321,9 @@ static inline void fib_info_put(struct fib_info *fi) free_fib_info(fi); } -static inline void fib_res_put(struct fib_result *res) -{ - if (res->fi) - fib_info_put(res->fi); -#ifdef CONFIG_IP_MULTIPLE_TABLES - if (res->r) - fib_rule_put(res->r); -#endif -} - #ifdef CONFIG_PROC_FS -extern int __net_init fib_proc_init(struct net *net); -extern void __net_exit fib_proc_exit(struct net *net); +int __net_init fib_proc_init(struct net *net); +void __net_exit fib_proc_exit(struct net *net); #else static inline int fib_proc_init(struct net *net) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h new file mode 100644 index 00000000000..a4daf9eb856 --- /dev/null +++ b/include/net/ip_tunnels.h @@ -0,0 +1,184 @@ +#ifndef __NET_IP_TUNNELS_H +#define __NET_IP_TUNNELS_H 1 + +#include <linux/if_tunnel.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> +#include <net/dsfield.h> +#include <net/gro_cells.h> +#include <net/inet_ecn.h> +#include <net/ip.h> +#include <net/rtnetlink.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#include <net/ip6_fib.h> +#include <net/ip6_route.h> +#endif + +/* Keep error state on tunnel for 30 sec */ +#define IPTUNNEL_ERR_TIMEO (30*HZ) + +/* 6rd prefix/relay information */ +#ifdef CONFIG_IPV6_SIT_6RD +struct ip_tunnel_6rd_parm { + struct in6_addr prefix; + __be32 relay_prefix; + u16 prefixlen; + u16 relay_prefixlen; +}; +#endif + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry __rcu *next; + __be32 addr; + u16 flags; + struct rcu_head rcu_head; +}; + +struct ip_tunnel_dst { + struct dst_entry __rcu *dst; +}; + +struct ip_tunnel { + struct ip_tunnel __rcu *next; + struct hlist_node hash_node; + struct net_device *dev; + struct net *net; /* netns for packet i/o */ + + int err_count; /* Number of arrived ICMP errors */ + unsigned long err_time; /* Time when the last ICMP error + * arrived */ + + /* These four fields used only by GRE */ + __u32 i_seqno; /* The last seen seqno */ + __u32 o_seqno; /* The last output seqno */ + int hlen; /* Precalculated header length */ + int mlink; + + struct ip_tunnel_dst __percpu *dst_cache; + + struct ip_tunnel_parm parms; + + /* for SIT */ +#ifdef CONFIG_IPV6_SIT_6RD + struct ip_tunnel_6rd_parm ip6rd; +#endif + struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */ + unsigned int prl_count; /* # of entries in PRL */ + int ip_tnl_net_id; + struct gro_cells gro_cells; +}; + +#define TUNNEL_CSUM __cpu_to_be16(0x01) +#define TUNNEL_ROUTING __cpu_to_be16(0x02) +#define TUNNEL_KEY __cpu_to_be16(0x04) +#define TUNNEL_SEQ __cpu_to_be16(0x08) +#define TUNNEL_STRICT __cpu_to_be16(0x10) +#define TUNNEL_REC __cpu_to_be16(0x20) +#define TUNNEL_VERSION __cpu_to_be16(0x40) +#define TUNNEL_NO_KEY __cpu_to_be16(0x80) +#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) + +struct tnl_ptk_info { + __be16 flags; + __be16 proto; + __be32 key; + __be32 seq; +}; + +#define PACKET_RCVD 0 +#define PACKET_REJECT 1 + +#define IP_TNL_HASH_BITS 7 +#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) + +struct ip_tunnel_net { + struct net_device *fb_tunnel_dev; + struct hlist_head tunnels[IP_TNL_HASH_SIZE]; +}; + +#ifdef CONFIG_INET + +int ip_tunnel_init(struct net_device *dev); +void ip_tunnel_uninit(struct net_device *dev); +void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); +int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, + struct rtnl_link_ops *ops, char *devname); + +void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); + +void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + const struct iphdr *tnl_params, const u8 protocol); +int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); + +struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot); +struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, + int link, __be16 flags, + __be32 remote, __be32 local, + __be32 key); + +int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, + const struct tnl_ptk_info *tpi, bool log_ecn_error); +int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], + struct ip_tunnel_parm *p); +int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], + struct ip_tunnel_parm *p); +void ip_tunnel_setup(struct net_device *dev, int net_id); +void ip_tunnel_dst_reset_all(struct ip_tunnel *t); + +/* Extract dsfield from inner protocol */ +static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, + const struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return iph->tos; + else if (skb->protocol == htons(ETH_P_IPV6)) + return ipv6_get_dsfield((const struct ipv6hdr *)iph); + else + return 0; +} + +/* Propogate ECN bits out */ +static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, + const struct sk_buff *skb) +{ + u8 inner = ip_tunnel_get_dsfield(iph, skb); + + return INET_ECN_encapsulate(tos, inner); +} + +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); +int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df, bool xnet); + +struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, + int gso_type_mask); + +static inline void iptunnel_xmit_stats(int err, + struct net_device_stats *err_stats, + struct pcpu_sw_netstats __percpu *stats) +{ + if (err > 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); + + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes += err; + tstats->tx_packets++; + u64_stats_update_end(&tstats->syncp); + } else if (err < 0) { + err_stats->tx_errors++; + err_stats->tx_aborted_errors++; + } else { + err_stats->tx_dropped++; + } +} + +#endif /* CONFIG_INET */ + +#endif /* __NET_IP_TUNNELS_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 56f3c94ae62..624a8a54806 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -3,331 +3,313 @@ * data structure and functionality definitions */ -#ifndef _IP_VS_H -#define _IP_VS_H +#ifndef _NET_IP_VS_H +#define _NET_IP_VS_H -#include <asm/types.h> /* For __uXX types */ -#include <linux/types.h> /* For __beXX types in userland */ +#include <linux/ip_vs.h> /* definitions shared with userland */ -#include <linux/sysctl.h> /* For ctl_path */ +#include <asm/types.h> /* for __uXX types */ -#define IP_VS_VERSION_CODE 0x010201 -#define NVERSION(version) \ - (version >> 16) & 0xFF, \ - (version >> 8) & 0xFF, \ - version & 0xFF - -/* - * Virtual Service Flags - */ -#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ -#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ - -/* - * Destination Server Flags - */ -#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ -#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ +#include <linux/list.h> /* for struct list_head */ +#include <linux/spinlock.h> /* for struct rwlock_t */ +#include <linux/atomic.h> /* for struct atomic_t */ +#include <linux/compiler.h> +#include <linux/timer.h> +#include <linux/bug.h> -/* - * IPVS sync daemon states - */ -#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ -#define IP_VS_STATE_MASTER 0x0001 /* started as master */ -#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ +#include <net/checksum.h> +#include <linux/netfilter.h> /* for union nf_inet_addr */ +#include <linux/ip.h> +#include <linux/ipv6.h> /* for struct ipv6hdr */ +#include <net/ipv6.h> +#if IS_ENABLED(CONFIG_IP_VS_IPV6) +#include <linux/netfilter_ipv6/ip6_tables.h> +#endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <net/netfilter/nf_conntrack.h> +#endif +#include <net/net_namespace.h> /* Netw namespace */ /* - * IPVS socket options + * Generic access of ipvs struct */ -#define IP_VS_BASE_CTL (64+1024+64) /* base */ - -#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ -#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) -#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) -#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) -#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) -#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) -#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) -#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) -#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) -#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) -#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) -#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) -#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) -#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) -#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) -#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) -#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO - -#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL -#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) -#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) -#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) -#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) -#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ -#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) -#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) -#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON - - +static inline struct netns_ipvs *net_ipvs(struct net* net) +{ + return net->ipvs; +} /* - * IPVS Connection Flags + * Get net ptr from skb in traffic cases + * use skb_sknet when call is from userland (ioctl or netlink) */ -#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ -#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ -#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ -#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ -#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ -#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ -#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ -#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ -#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ -#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ -#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ -#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ -#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ -#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ -#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ - -/* Move it to better place one day, for now keep it unique */ -#define NFC_IPVS_PROPERTY 0x10000 - -#define IP_VS_SCHEDNAME_MAXLEN 16 -#define IP_VS_IFNAME_MAXLEN 16 - +static inline struct net *skb_net(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_NS +#ifdef CONFIG_IP_VS_DEBUG + /* + * This is used for debug only. + * Start with the most likely hit + * End with BUG + */ + if (likely(skb->dev && skb->dev->nd_net)) + return dev_net(skb->dev); + if (skb_dst(skb) && skb_dst(skb)->dev) + return dev_net(skb_dst(skb)->dev); + WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", + __func__, __LINE__); + if (likely(skb->sk && skb->sk->sk_net)) + return sock_net(skb->sk); + pr_err("There is no net ptr to find in the skb in %s() line:%d\n", + __func__, __LINE__); + BUG(); +#else + return dev_net(skb->dev ? : skb_dst(skb)->dev); +#endif +#else + return &init_net; +#endif +} +static inline struct net *skb_sknet(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_NS +#ifdef CONFIG_IP_VS_DEBUG + /* Start with the most likely hit */ + if (likely(skb->sk && skb->sk->sk_net)) + return sock_net(skb->sk); + WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", + __func__, __LINE__); + if (likely(skb->dev && skb->dev->nd_net)) + return dev_net(skb->dev); + pr_err("There is no net ptr to find in the skb in %s() line:%d\n", + __func__, __LINE__); + BUG(); +#else + return sock_net(skb->sk); +#endif +#else + return &init_net; +#endif +} /* - * The struct ip_vs_service_user and struct ip_vs_dest_user are - * used to set IPVS rules through setsockopt. + * This one needed for single_open_net since net is stored directly in + * private not as a struct i.e. seq_file_net can't be used. */ -struct ip_vs_service_user { - /* virtual service addresses */ - u_int16_t protocol; - __be32 addr; /* virtual ip address */ - __be16 port; - u_int32_t fwmark; /* firwall mark of service */ +static inline struct net *seq_file_single_net(struct seq_file *seq) +{ +#ifdef CONFIG_NET_NS + return (struct net *)seq->private; +#else + return &init_net; +#endif +} - /* virtual service options */ - char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout in sec */ - __be32 netmask; /* persistent netmask */ +/* Connections' size value needed by ip_vs_ctl.c */ +extern int ip_vs_conn_tab_size; + +struct ip_vs_iphdr { + __u32 len; /* IPv4 simply where L4 starts + IPv6 where L4 Transport Header starts */ + __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ + __s16 protocol; + __s32 flags; + union nf_inet_addr saddr; + union nf_inet_addr daddr; }; +static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, + int len, void *buffer, + const struct ip_vs_iphdr *ipvsh) +{ + return skb_header_pointer(skb, offset, len, buffer); +} -struct ip_vs_dest_user { - /* destination server address */ - __be32 addr; - __be16 port; - - /* real server options */ - unsigned conn_flags; /* connection flags */ - int weight; /* destination weight */ - - /* thresholds for active connections */ - u_int32_t u_threshold; /* upper threshold */ - u_int32_t l_threshold; /* lower threshold */ -}; +static inline void +ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr) +{ + const struct iphdr *iph = nh; + iphdr->len = iph->ihl * 4; + iphdr->fragoffs = 0; + iphdr->protocol = iph->protocol; + iphdr->saddr.ip = iph->saddr; + iphdr->daddr.ip = iph->daddr; +} -/* - * IPVS statistics object (for user space) +/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. + * IPv6 requires some extra work, as finding proper header position, + * depend on the IPv6 extension headers. */ -struct ip_vs_stats_user +static inline void +ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr) { - __u32 conns; /* connections scheduled */ - __u32 inpkts; /* incoming packets */ - __u32 outpkts; /* outgoing packets */ - __u64 inbytes; /* incoming bytes */ - __u64 outbytes; /* outgoing bytes */ - - __u32 cps; /* current connection rate */ - __u32 inpps; /* current in packet rate */ - __u32 outpps; /* current out packet rate */ - __u32 inbps; /* current in byte rate */ - __u32 outbps; /* current out byte rate */ -}; - - -/* The argument to IP_VS_SO_GET_INFO */ -struct ip_vs_getinfo { - /* version number */ - unsigned int version; - - /* size of connection hash table */ - unsigned int size; - - /* number of virtual services */ - unsigned int num_services; -}; - - -/* The argument to IP_VS_SO_GET_SERVICE */ -struct ip_vs_service_entry { - /* which service: user fills in these */ - u_int16_t protocol; - __be32 addr; /* virtual address */ - __be16 port; - u_int32_t fwmark; /* firwall mark of service */ - - /* service options */ - char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout */ - __be32 netmask; /* persistent netmask */ - - /* number of real servers */ - unsigned int num_dests; - - /* statistics */ - struct ip_vs_stats_user stats; -}; - - -struct ip_vs_dest_entry { - __be32 addr; /* destination address */ - __be16 port; - unsigned conn_flags; /* connection flags */ - int weight; /* destination weight */ - - u_int32_t u_threshold; /* upper threshold */ - u_int32_t l_threshold; /* lower threshold */ - - u_int32_t activeconns; /* active connections */ - u_int32_t inactconns; /* inactive connections */ - u_int32_t persistconns; /* persistent connections */ - - /* statistics */ - struct ip_vs_stats_user stats; -}; - - -/* The argument to IP_VS_SO_GET_DESTS */ -struct ip_vs_get_dests { - /* which service: user fills in these */ - u_int16_t protocol; - __be32 addr; /* virtual address */ - __be16 port; - u_int32_t fwmark; /* firwall mark of service */ - - /* number of real servers */ - unsigned int num_dests; - - /* the real servers */ - struct ip_vs_dest_entry entrytable[0]; -}; - - -/* The argument to IP_VS_SO_GET_SERVICES */ -struct ip_vs_get_services { - /* number of virtual services */ - unsigned int num_services; - - /* service table */ - struct ip_vs_service_entry entrytable[0]; -}; - - -/* The argument to IP_VS_SO_GET_TIMEOUT */ -struct ip_vs_timeout_user { - int tcp_timeout; - int tcp_fin_timeout; - int udp_timeout; -}; - - -/* The argument to IP_VS_SO_GET_DAEMON */ -struct ip_vs_daemon_user { - /* sync daemon state (master/backup) */ - int state; +#ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) { + const struct ipv6hdr *iph = + (struct ipv6hdr *)skb_network_header(skb); + iphdr->saddr.in6 = iph->saddr; + iphdr->daddr.in6 = iph->daddr; + /* ipv6_find_hdr() updates len, flags */ + iphdr->len = 0; + iphdr->flags = 0; + iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, + &iphdr->fragoffs, + &iphdr->flags); + } else +#endif + { + const struct iphdr *iph = + (struct iphdr *)skb_network_header(skb); + iphdr->len = iph->ihl * 4; + iphdr->fragoffs = 0; + iphdr->protocol = iph->protocol; + iphdr->saddr.ip = iph->saddr; + iphdr->daddr.ip = iph->daddr; + } +} - /* multicast interface name */ - char mcast_ifn[IP_VS_IFNAME_MAXLEN]; +static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, + const union nf_inet_addr *src) +{ +#ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) + dst->in6 = src->in6; + else +#endif + dst->ip = src->ip; +} - /* SyncID we belong to */ - int syncid; -}; +static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, + const union nf_inet_addr *src) +{ +#ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) { + dst->in6 = src->in6; + return; + } +#endif + dst->ip = src->ip; + dst->all[1] = 0; + dst->all[2] = 0; + dst->all[3] = 0; +} +static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, + const union nf_inet_addr *b) +{ +#ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) + return ipv6_addr_equal(&a->in6, &b->in6); +#endif + return a->ip == b->ip; +} -#ifdef __KERNEL__ +#ifdef CONFIG_IP_VS_DEBUG +#include <linux/net.h> -#include <linux/list.h> /* for struct list_head */ -#include <linux/spinlock.h> /* for struct rwlock_t */ -#include <asm/atomic.h> /* for struct atomic_t */ -#include <linux/compiler.h> -#include <linux/timer.h> +int ip_vs_get_debug_level(void); -#include <net/checksum.h> +static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, + const union nf_inet_addr *addr, + int *idx) +{ + int len; +#ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) + len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", + &addr->in6) + 1; + else +#endif + len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", + &addr->ip) + 1; -#ifdef CONFIG_IP_VS_DEBUG -#include <linux/net.h> + *idx += len; + BUG_ON(*idx > buf_len + 1); + return &buf[*idx - len]; +} -extern int ip_vs_get_debug_level(void); -#define IP_VS_DBG(level, msg...) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - printk(KERN_DEBUG "IPVS: " msg); \ - } while (0) -#define IP_VS_DBG_RL(msg...) \ - do { \ - if (net_ratelimit()) \ - printk(KERN_DEBUG "IPVS: " msg); \ - } while (0) -#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - pp->debug_packet(pp, skb, ofs, msg); \ - } while (0) -#define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) \ - do { \ - if (level <= ip_vs_get_debug_level() && \ - net_ratelimit()) \ - pp->debug_packet(pp, skb, ofs, msg); \ - } while (0) +#define IP_VS_DBG_BUF(level, msg, ...) \ + do { \ + char ip_vs_dbg_buf[160]; \ + int ip_vs_dbg_idx = 0; \ + if (level <= ip_vs_get_debug_level()) \ + printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ + } while (0) +#define IP_VS_ERR_BUF(msg...) \ + do { \ + char ip_vs_dbg_buf[160]; \ + int ip_vs_dbg_idx = 0; \ + pr_err(msg); \ + } while (0) + +/* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ +#define IP_VS_DBG_ADDR(af, addr) \ + ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ + sizeof(ip_vs_dbg_buf), addr, \ + &ip_vs_dbg_idx) + +#define IP_VS_DBG(level, msg, ...) \ + do { \ + if (level <= ip_vs_get_debug_level()) \ + printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ + } while (0) +#define IP_VS_DBG_RL(msg, ...) \ + do { \ + if (net_ratelimit()) \ + printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ + } while (0) +#define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ + do { \ + if (level <= ip_vs_get_debug_level()) \ + pp->debug_packet(af, pp, skb, ofs, msg); \ + } while (0) +#define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ + do { \ + if (level <= ip_vs_get_debug_level() && \ + net_ratelimit()) \ + pp->debug_packet(af, pp, skb, ofs, msg); \ + } while (0) #else /* NO DEBUGGING at ALL */ +#define IP_VS_DBG_BUF(level, msg...) do {} while (0) +#define IP_VS_ERR_BUF(msg...) do {} while (0) #define IP_VS_DBG(level, msg...) do {} while (0) #define IP_VS_DBG_RL(msg...) do {} while (0) -#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0) -#define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) do {} while (0) +#define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) +#define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) #endif #define IP_VS_BUG() BUG() -#define IP_VS_ERR(msg...) printk(KERN_ERR "IPVS: " msg) -#define IP_VS_INFO(msg...) printk(KERN_INFO "IPVS: " msg) -#define IP_VS_WARNING(msg...) \ - printk(KERN_WARNING "IPVS: " msg) -#define IP_VS_ERR_RL(msg...) \ - do { \ - if (net_ratelimit()) \ - printk(KERN_ERR "IPVS: " msg); \ - } while (0) +#define IP_VS_ERR_RL(msg, ...) \ + do { \ + if (net_ratelimit()) \ + pr_err(msg, ##__VA_ARGS__); \ + } while (0) #ifdef CONFIG_IP_VS_DEBUG #define EnterFunction(level) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - printk(KERN_DEBUG "Enter: %s, %s line %i\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - } while (0) -#define LeaveFunction(level) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - printk(KERN_DEBUG "Leave: %s, %s line %i\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - } while (0) + do { \ + if (level <= ip_vs_get_debug_level()) \ + printk(KERN_DEBUG \ + pr_fmt("Enter: %s, %s line %i\n"), \ + __func__, __FILE__, __LINE__); \ + } while (0) +#define LeaveFunction(level) \ + do { \ + if (level <= ip_vs_get_debug_level()) \ + printk(KERN_DEBUG \ + pr_fmt("Leave: %s, %s line %i\n"), \ + __func__, __FILE__, __LINE__); \ + } while (0) #else #define EnterFunction(level) do {} while (0) #define LeaveFunction(level) do {} while (0) #endif -#define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); } - /* * The port number of FTP service (in network order). */ -#define FTPPORT __constant_htons(21) -#define FTPDATA __constant_htons(20) +#define FTPPORT cpu_to_be16(21) +#define FTPDATA cpu_to_be16(20) /* * TCP State Values @@ -364,6 +346,27 @@ enum { }; /* + * SCTP State Values + */ +enum ip_vs_sctp_states { + IP_VS_SCTP_S_NONE, + IP_VS_SCTP_S_INIT1, + IP_VS_SCTP_S_INIT, + IP_VS_SCTP_S_COOKIE_SENT, + IP_VS_SCTP_S_COOKIE_REPLIED, + IP_VS_SCTP_S_COOKIE_WAIT, + IP_VS_SCTP_S_COOKIE, + IP_VS_SCTP_S_COOKIE_ECHOED, + IP_VS_SCTP_S_ESTABLISHED, + IP_VS_SCTP_S_SHUTDOWN_SENT, + IP_VS_SCTP_S_SHUTDOWN_RECEIVED, + IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, + IP_VS_SCTP_S_REJECTED, + IP_VS_SCTP_S_CLOSED, + IP_VS_SCTP_S_LAST +}; + +/* * Delta sequence info structure * Each ip_vs_conn has 2 (output AND input seq. changes). * Only used in the VS/NAT. @@ -375,25 +378,49 @@ struct ip_vs_seq { before last resized pkt */ }; - /* - * IPVS statistics object + * counters per cpu */ -struct ip_vs_stats -{ - __u32 conns; /* connections scheduled */ - __u32 inpkts; /* incoming packets */ - __u32 outpkts; /* outgoing packets */ - __u64 inbytes; /* incoming bytes */ - __u64 outbytes; /* outgoing bytes */ +struct ip_vs_counters { + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ +}; +/* + * Stats per cpu + */ +struct ip_vs_cpu_stats { + struct ip_vs_counters ustats; + struct u64_stats_sync syncp; +}; - __u32 cps; /* current connection rate */ - __u32 inpps; /* current in packet rate */ - __u32 outpps; /* current out packet rate */ - __u32 inbps; /* current in byte rate */ - __u32 outbps; /* current out byte rate */ +/* + * IPVS statistics objects + */ +struct ip_vs_estimator { + struct list_head list; + + u64 last_inbytes; + u64 last_outbytes; + u32 last_conns; + u32 last_inpkts; + u32 last_outpkts; + + u32 cps; + u32 inpps; + u32 outpps; + u32 inbps; + u32 outbps; +}; - spinlock_t lock; /* spin lock */ +struct ip_vs_stats { + struct ip_vs_stats_user ustats; /* statistics */ + struct ip_vs_estimator est; /* estimator */ + struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ + spinlock_t lock; /* spin lock */ + struct ip_vs_stats_user ustats0; /* reset values */ }; struct dst_entry; @@ -401,83 +428,116 @@ struct iphdr; struct ip_vs_conn; struct ip_vs_app; struct sk_buff; +struct ip_vs_proto_data; struct ip_vs_protocol { struct ip_vs_protocol *next; char *name; - __u16 protocol; + u16 protocol; + u16 num_states; int dont_defrag; - atomic_t appcnt; /* counter of proto app incs */ - int *timeout_table; /* protocol timeout table */ void (*init)(struct ip_vs_protocol *pp); void (*exit)(struct ip_vs_protocol *pp); - int (*conn_schedule)(struct sk_buff *skb, - struct ip_vs_protocol *pp, - int *verdict, struct ip_vs_conn **cpp); + int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); + + void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); + + int (*conn_schedule)(int af, struct sk_buff *skb, + struct ip_vs_proto_data *pd, + int *verdict, struct ip_vs_conn **cpp, + struct ip_vs_iphdr *iph); struct ip_vs_conn * - (*conn_in_get)(const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct iphdr *iph, - unsigned int proto_off, + (*conn_in_get)(int af, + const struct sk_buff *skb, + const struct ip_vs_iphdr *iph, int inverse); struct ip_vs_conn * - (*conn_out_get)(const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct iphdr *iph, - unsigned int proto_off, + (*conn_out_get)(int af, + const struct sk_buff *skb, + const struct ip_vs_iphdr *iph, int inverse); - int (*snat_handler)(struct sk_buff *skb, - struct ip_vs_protocol *pp, struct ip_vs_conn *cp); + int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); - int (*dnat_handler)(struct sk_buff *skb, - struct ip_vs_protocol *pp, struct ip_vs_conn *cp); + int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); - int (*csum_check)(struct sk_buff *skb, struct ip_vs_protocol *pp); + int (*csum_check)(int af, struct sk_buff *skb, + struct ip_vs_protocol *pp); const char *(*state_name)(int state); - int (*state_transition)(struct ip_vs_conn *cp, int direction, - const struct sk_buff *skb, - struct ip_vs_protocol *pp); + void (*state_transition)(struct ip_vs_conn *cp, int direction, + const struct sk_buff *skb, + struct ip_vs_proto_data *pd); - int (*register_app)(struct ip_vs_app *inc); + int (*register_app)(struct net *net, struct ip_vs_app *inc); - void (*unregister_app)(struct ip_vs_app *inc); + void (*unregister_app)(struct net *net, struct ip_vs_app *inc); int (*app_conn_bind)(struct ip_vs_conn *cp); - void (*debug_packet)(struct ip_vs_protocol *pp, + void (*debug_packet)(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg); - void (*timeout_change)(struct ip_vs_protocol *pp, int flags); + void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); +}; - int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to); +/* + * protocol data per netns + */ +struct ip_vs_proto_data { + struct ip_vs_proto_data *next; + struct ip_vs_protocol *pp; + int *timeout_table; /* protocol timeout table */ + atomic_t appcnt; /* counter of proto app incs. */ + struct tcp_states_t *tcp_state_table; }; -extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto); +struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); +struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net, + unsigned short proto); + +struct ip_vs_conn_param { + struct net *net; + const union nf_inet_addr *caddr; + const union nf_inet_addr *vaddr; + __be16 cport; + __be16 vport; + __u16 protocol; + u16 af; + + const struct ip_vs_pe *pe; + char *pe_data; + __u8 pe_data_len; +}; /* * IP_VS structure allocated for each dynamically scheduled connection */ struct ip_vs_conn { - struct list_head c_list; /* hashed list heads */ - + struct hlist_node c_list; /* hashed list heads */ /* Protocol, addresses and port numbers */ - __be32 caddr; /* client address */ - __be32 vaddr; /* virtual address */ - __be32 daddr; /* destination address */ - __be16 cport; - __be16 vport; - __be16 dport; + __be16 cport; + __be16 dport; + __be16 vport; + u16 af; /* address family */ + union nf_inet_addr caddr; /* client address */ + union nf_inet_addr vaddr; /* virtual address */ + union nf_inet_addr daddr; /* destination address */ + volatile __u32 flags; /* status flags */ __u16 protocol; /* Which protocol (TCP/UDP) */ +#ifdef CONFIG_NET_NS + struct net *net; /* Name space */ +#endif /* counter and timer */ atomic_t refcnt; /* reference count */ @@ -486,12 +546,13 @@ struct ip_vs_conn { /* Flags and state transition */ spinlock_t lock; /* lock for state transition */ - volatile __u16 flags; /* status flags */ volatile __u16 state; /* state info */ volatile __u16 old_state; /* old state, to be used for * state transition triggerd * synchronization */ + __u32 fwmark; /* Fire wall mark from skb */ + unsigned long sync_endtime; /* jiffies + sent_retries */ /* Control members */ struct ip_vs_conn *control; /* Master control connection */ @@ -502,9 +563,10 @@ struct ip_vs_conn { /* packet transmitter for different forwarding methods. If it mangles the packet, it must return NF_DROP or better NF_STOLEN, otherwise this must be changed to a sk_buff **. + NF_ACCEPT can be returned when destination is local. */ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, - struct ip_vs_protocol *pp); + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); /* Note: we can group the following members into a structure, in order to save more space, and the following members are @@ -513,6 +575,79 @@ struct ip_vs_conn { void *app_data; /* Application private data */ struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ + + const struct ip_vs_pe *pe; + char *pe_data; + __u8 pe_data_len; + + struct rcu_head rcu_head; +}; + +/* + * To save some memory in conn table when name space is disabled. + */ +static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) +{ +#ifdef CONFIG_NET_NS + return cp->net; +#else + return &init_net; +#endif +} +static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) +{ +#ifdef CONFIG_NET_NS + cp->net = net; +#endif +} + +static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, + struct net *net) +{ +#ifdef CONFIG_NET_NS + return cp->net == net; +#else + return 1; +#endif +} + +/* + * Extended internal versions of struct ip_vs_service_user and + * ip_vs_dest_user for IPv6 support. + * + * We need these to conveniently pass around service and destination + * options, but unfortunately, we also need to keep the old definitions to + * maintain userspace backwards compatibility for the setsockopt interface. + */ +struct ip_vs_service_user_kern { + /* virtual service addresses */ + u16 af; + u16 protocol; + union nf_inet_addr addr; /* virtual ip address */ + __be16 port; + u32 fwmark; /* firwall mark of service */ + + /* virtual service options */ + char *sched_name; + char *pe_name; + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask or plen */ +}; + + +struct ip_vs_dest_user_kern { + /* destination server address */ + union nf_inet_addr addr; + __be16 port; + + /* real server options */ + unsigned int conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + u32 u_threshold; /* upper threshold */ + u32 l_threshold; /* lower threshold */ }; @@ -521,30 +656,42 @@ struct ip_vs_conn { * and the forwarding entries */ struct ip_vs_service { - struct list_head s_list; /* for normal service table */ - struct list_head f_list; /* for fwmark-based service table */ + struct hlist_node s_list; /* for normal service table */ + struct hlist_node f_list; /* for fwmark-based service table */ atomic_t refcnt; /* reference counter */ - atomic_t usecnt; /* use counter */ + u16 af; /* address family */ __u16 protocol; /* which protocol (TCP/UDP) */ - __be32 addr; /* IP address for virtual service */ + union nf_inet_addr addr; /* IP address for virtual service */ __be16 port; /* port number for the service */ __u32 fwmark; /* firewall mark of the service */ - unsigned flags; /* service status flags */ - unsigned timeout; /* persistent timeout in ticks */ - __be32 netmask; /* grouping granularity */ + unsigned int flags; /* service status flags */ + unsigned int timeout; /* persistent timeout in ticks */ + __be32 netmask; /* grouping granularity, mask/plen */ + struct net *net; struct list_head destinations; /* real server d-linked list */ __u32 num_dests; /* number of servers */ struct ip_vs_stats stats; /* statistics for the service */ - struct ip_vs_app *inc; /* bind conns to this app inc */ /* for scheduling */ - struct ip_vs_scheduler *scheduler; /* bound scheduler object */ - rwlock_t sched_lock; /* lock sched_data */ + struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ + spinlock_t sched_lock; /* lock sched_data */ void *sched_data; /* scheduler application data */ + + /* alternate persistence engine */ + struct ip_vs_pe __rcu *pe; + + struct rcu_head rcu_head; }; +/* Information for cached dst */ +struct ip_vs_dest_dst { + struct dst_entry *dst_cache; /* destination cache entry */ + u32 dst_cookie; + union nf_inet_addr dst_saddr; + struct rcu_head rcu_head; +}; /* * The real server destination forwarding entry @@ -552,16 +699,18 @@ struct ip_vs_service { */ struct ip_vs_dest { struct list_head n_list; /* for the dests in the service */ - struct list_head d_list; /* for table with all the dests */ + struct hlist_node d_list; /* for table with all the dests */ - __be32 addr; /* IP address of the server */ + u16 af; /* address family */ __be16 port; /* port number of the server */ - volatile unsigned flags; /* dest status flags */ + union nf_inet_addr addr; /* IP address of the server */ + volatile unsigned int flags; /* dest status flags */ atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ atomic_t refcnt; /* reference counter */ struct ip_vs_stats stats; /* statistics */ + unsigned long idle_start; /* start time, jiffies */ /* connection counters and thresholds */ atomic_t activeconns; /* active connections */ @@ -572,15 +721,17 @@ struct ip_vs_dest { /* for destination cache */ spinlock_t dst_lock; /* lock of dst_cache */ - struct dst_entry *dst_cache; /* destination cache entry */ - u32 dst_rtos; /* RT_TOS(tos) for dst */ + struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ /* for virtual service */ - struct ip_vs_service *svc; /* service it belongs to */ + struct ip_vs_service __rcu *svc; /* service it belongs to */ __u16 protocol; /* which protocol (TCP/UDP) */ - __be32 vaddr; /* virtual IP address */ __be16 vport; /* virtual port number */ + union nf_inet_addr vaddr; /* virtual IP address */ __u32 vfwmark; /* firewall mark of service */ + + struct list_head t_list; /* in dest_trash */ + unsigned int in_rs_table:1; /* we are in rs_table */ }; @@ -596,21 +747,40 @@ struct ip_vs_scheduler { /* scheduler initializing service */ int (*init_service)(struct ip_vs_service *svc); /* scheduling service finish */ - int (*done_service)(struct ip_vs_service *svc); - /* scheduler updating service */ - int (*update_service)(struct ip_vs_service *svc); + void (*done_service)(struct ip_vs_service *svc); + /* dest is linked */ + int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); + /* dest is unlinked */ + int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); + /* dest is updated */ + int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); /* selecting a server from the given service */ struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, - const struct sk_buff *skb); + const struct sk_buff *skb, + struct ip_vs_iphdr *iph); }; +/* The persistence engine object */ +struct ip_vs_pe { + struct list_head n_list; /* d-linked list head */ + char *name; /* scheduler name */ + atomic_t refcnt; /* reference counter */ + struct module *module; /* THIS_MODULE/NULL */ + + /* get the connection template, if any */ + int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); + bool (*ct_match)(const struct ip_vs_conn_param *p, + struct ip_vs_conn *ct); + u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, + bool inverse); + int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); +}; /* * The application module object (a.k.a. app incarnation) */ -struct ip_vs_app -{ +struct ip_vs_app { struct list_head a_list; /* member in app list */ int type; /* IP_VS_APP_TYPE_xxx */ char *name; /* application module name */ @@ -623,12 +793,21 @@ struct ip_vs_app struct ip_vs_app *app; /* its real application */ __be16 port; /* port number in net order */ atomic_t usecnt; /* usage counter */ + struct rcu_head rcu_head; - /* output hook: return false if can't linearize. diff set for TCP. */ + /* + * output hook: Process packet in inout direction, diff set for TCP. + * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, + * 2=Mangled but checksum was not updated + */ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff); - /* input hook: return false if can't linearize. diff set for TCP. */ + /* + * input hook: Process packet in outin direction, diff set for TCP. + * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, + * 2=Mangled but checksum was not updated + */ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff); @@ -654,13 +833,11 @@ struct ip_vs_app struct ip_vs_conn * (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, - const struct iphdr *iph, unsigned int proto_off, - int inverse); + const struct iphdr *iph, int inverse); struct ip_vs_conn * (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, - const struct iphdr *iph, unsigned int proto_off, - int inverse); + const struct iphdr *iph, int inverse); int (*state_transition)(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, @@ -669,14 +846,306 @@ struct ip_vs_app void (*timeout_change)(struct ip_vs_app *app, int flags); }; +struct ipvs_master_sync_state { + struct list_head sync_queue; + struct ip_vs_sync_buff *sync_buff; + unsigned long sync_queue_len; + unsigned int sync_queue_delay; + struct task_struct *master_thread; + struct delayed_work master_wakeup_work; + struct netns_ipvs *ipvs; +}; + +/* How much time to keep dests in trash */ +#define IP_VS_DEST_TRASH_PERIOD (120 * HZ) + +/* IPVS in network namespace */ +struct netns_ipvs { + int gen; /* Generation */ + int enable; /* enable like nf_hooks do */ + /* + * Hash table: for real service lookups + */ + #define IP_VS_RTAB_BITS 4 + #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) + #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) + + struct hlist_head rs_table[IP_VS_RTAB_SIZE]; + /* ip_vs_app */ + struct list_head app_list; + /* ip_vs_proto */ + #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ + struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; + /* ip_vs_proto_tcp */ +#ifdef CONFIG_IP_VS_PROTO_TCP + #define TCP_APP_TAB_BITS 4 + #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) + #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) + struct list_head tcp_apps[TCP_APP_TAB_SIZE]; +#endif + /* ip_vs_proto_udp */ +#ifdef CONFIG_IP_VS_PROTO_UDP + #define UDP_APP_TAB_BITS 4 + #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) + #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) + struct list_head udp_apps[UDP_APP_TAB_SIZE]; +#endif + /* ip_vs_proto_sctp */ +#ifdef CONFIG_IP_VS_PROTO_SCTP + #define SCTP_APP_TAB_BITS 4 + #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) + #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) + /* Hash table for SCTP application incarnations */ + struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; +#endif + /* ip_vs_conn */ + atomic_t conn_count; /* connection counter */ + + /* ip_vs_ctl */ + struct ip_vs_stats tot_stats; /* Statistics & est. */ + + int num_services; /* no of virtual services */ + + /* Trash for destinations */ + struct list_head dest_trash; + spinlock_t dest_trash_lock; + struct timer_list dest_trash_timer; /* expiration timer */ + /* Service counters */ + atomic_t ftpsvc_counter; + atomic_t nullsvc_counter; + +#ifdef CONFIG_SYSCTL + /* 1/rate drop and drop-entry variables */ + struct delayed_work defense_work; /* Work handler */ + int drop_rate; + int drop_counter; + atomic_t dropentry; + /* locks in ctl.c */ + spinlock_t dropentry_lock; /* drop entry handling */ + spinlock_t droppacket_lock; /* drop packet handling */ + spinlock_t securetcp_lock; /* state and timeout tables */ + + /* sys-ctl struct */ + struct ctl_table_header *sysctl_hdr; + struct ctl_table *sysctl_tbl; +#endif + + /* sysctl variables */ + int sysctl_amemthresh; + int sysctl_am_droprate; + int sysctl_drop_entry; + int sysctl_drop_packet; + int sysctl_secure_tcp; +#ifdef CONFIG_IP_VS_NFCT + int sysctl_conntrack; +#endif + int sysctl_snat_reroute; + int sysctl_sync_ver; + int sysctl_sync_ports; + int sysctl_sync_persist_mode; + unsigned long sysctl_sync_qlen_max; + int sysctl_sync_sock_size; + int sysctl_cache_bypass; + int sysctl_expire_nodest_conn; + int sysctl_sloppy_tcp; + int sysctl_sloppy_sctp; + int sysctl_expire_quiescent_template; + int sysctl_sync_threshold[2]; + unsigned int sysctl_sync_refresh_period; + int sysctl_sync_retries; + int sysctl_nat_icmp_send; + int sysctl_pmtu_disc; + int sysctl_backup_only; + + /* ip_vs_lblc */ + int sysctl_lblc_expiration; + struct ctl_table_header *lblc_ctl_header; + struct ctl_table *lblc_ctl_table; + /* ip_vs_lblcr */ + int sysctl_lblcr_expiration; + struct ctl_table_header *lblcr_ctl_header; + struct ctl_table *lblcr_ctl_table; + /* ip_vs_est */ + struct list_head est_list; /* estimator list */ + spinlock_t est_lock; + struct timer_list est_timer; /* Estimation timer */ + /* ip_vs_sync */ + spinlock_t sync_lock; + struct ipvs_master_sync_state *ms; + spinlock_t sync_buff_lock; + struct task_struct **backup_threads; + int threads_mask; + int send_mesg_maxlen; + int recv_mesg_maxlen; + volatile int sync_state; + volatile int master_syncid; + volatile int backup_syncid; + struct mutex sync_mutex; + /* multicast interface name */ + char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; + char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; + /* net name space ptr */ + struct net *net; /* Needed by timer routines */ +}; + +#define DEFAULT_SYNC_THRESHOLD 3 +#define DEFAULT_SYNC_PERIOD 50 +#define DEFAULT_SYNC_VER 1 +#define DEFAULT_SLOPPY_TCP 0 +#define DEFAULT_SLOPPY_SCTP 0 +#define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) +#define DEFAULT_SYNC_RETRIES 0 +#define IPVS_SYNC_WAKEUP_RATE 8 +#define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) +#define IPVS_SYNC_SEND_DELAY (HZ / 50) +#define IPVS_SYNC_CHECK_PERIOD HZ +#define IPVS_SYNC_FLUSH_TIME (HZ * 2) +#define IPVS_SYNC_PORTS_MAX (1 << 6) + +#ifdef CONFIG_SYSCTL + +static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_threshold[0]; +} + +static inline int sysctl_sync_period(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); +} + +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_retries; +} + +static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_ver; +} + +static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sloppy_tcp; +} + +static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sloppy_sctp; +} + +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_ports); +} + +static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_persist_mode; +} + +static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_qlen_max; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_sock_size; +} + +static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_pmtu_disc; +} + +static inline int sysctl_backup_only(struct netns_ipvs *ipvs) +{ + return ipvs->sync_state & IP_VS_STATE_BACKUP && + ipvs->sysctl_backup_only; +} + +#else + +static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_THRESHOLD; +} + +static inline int sysctl_sync_period(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_PERIOD; +} + +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_REFRESH_PERIOD; +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_RETRIES & 3; +} + +static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_VER; +} + +static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) +{ + return DEFAULT_SLOPPY_TCP; +} + +static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) +{ + return DEFAULT_SLOPPY_SCTP; +} + +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return 1; +} + +static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) +{ + return 0; +} + +static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return IPVS_SYNC_QLEN_MAX; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return 0; +} + +static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) +{ + return 1; +} + +static inline int sysctl_backup_only(struct netns_ipvs *ipvs) +{ + return 0; +} + +#endif /* * IPVS core functions * (from ip_vs_core.c) */ -extern const char *ip_vs_proto_name(unsigned proto); -extern void ip_vs_init_hash_table(struct list_head *table, int rows); -#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) +const char *ip_vs_proto_name(unsigned int proto); +void ip_vs_init_hash_table(struct list_head *table, int rows); +#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) #define IP_VS_APP_TYPE_FTP 1 @@ -685,25 +1154,6 @@ extern void ip_vs_init_hash_table(struct list_head *table, int rows); * (from ip_vs_conn.c) */ -/* - * IPVS connection entry hash table - */ -#ifndef CONFIG_IP_VS_TAB_BITS -#define CONFIG_IP_VS_TAB_BITS 12 -#endif -/* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */ -#if CONFIG_IP_VS_TAB_BITS < 8 -#define IP_VS_CONN_TAB_BITS 8 -#endif -#if CONFIG_IP_VS_TAB_BITS > 20 -#define IP_VS_CONN_TAB_BITS 20 -#endif -#if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20 -#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS -#endif -#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS) -#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1) - enum { IP_VS_DIR_INPUT = 0, IP_VS_DIR_OUTPUT, @@ -711,57 +1161,99 @@ enum { IP_VS_DIR_LAST, }; -extern struct ip_vs_conn *ip_vs_conn_in_get -(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); -extern struct ip_vs_conn *ip_vs_ct_in_get -(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); -extern struct ip_vs_conn *ip_vs_conn_out_get -(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); +static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, + const union nf_inet_addr *caddr, + __be16 cport, + const union nf_inet_addr *vaddr, + __be16 vport, + struct ip_vs_conn_param *p) +{ + p->net = net; + p->af = af; + p->protocol = protocol; + p->caddr = caddr; + p->cport = cport; + p->vaddr = vaddr; + p->vport = vport; + p->pe = NULL; + p->pe_data = NULL; +} + +struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); +struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); + +struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + int inverse); + +struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); + +struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, + const struct ip_vs_iphdr *iph, + int inverse); + +/* Get reference to gain full access to conn. + * By default, RCU read-side critical sections have access only to + * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. + */ +static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) +{ + return atomic_inc_not_zero(&cp->refcnt); +} /* put back the conn without restarting its timer */ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) { + smp_mb__before_atomic(); atomic_dec(&cp->refcnt); } -extern void ip_vs_conn_put(struct ip_vs_conn *cp); -extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); +void ip_vs_conn_put(struct ip_vs_conn *cp); +void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); -extern struct ip_vs_conn * -ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, - __be32 daddr, __be16 dport, unsigned flags, - struct ip_vs_dest *dest); -extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); +struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, + const union nf_inet_addr *daddr, + __be16 dport, unsigned int flags, + struct ip_vs_dest *dest, __u32 fwmark); +void ip_vs_conn_expire_now(struct ip_vs_conn *cp); -extern const char * ip_vs_state_name(__u16 proto, int state); +const char *ip_vs_state_name(__u16 proto, int state); -extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); -extern int ip_vs_check_template(struct ip_vs_conn *ct); -extern void ip_vs_random_dropentry(void); -extern int ip_vs_conn_init(void); -extern void ip_vs_conn_cleanup(void); +void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp); +int ip_vs_check_template(struct ip_vs_conn *ct); +void ip_vs_random_dropentry(struct net *net); +int ip_vs_conn_init(void); +void ip_vs_conn_cleanup(void); static inline void ip_vs_control_del(struct ip_vs_conn *cp) { struct ip_vs_conn *ctl_cp = cp->control; if (!ctl_cp) { - IP_VS_ERR("request control DEL for uncontrolled: " - "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", - NIPQUAD(cp->caddr),ntohs(cp->cport), - NIPQUAD(cp->vaddr),ntohs(cp->vport)); + IP_VS_ERR_BUF("request control DEL for uncontrolled: " + "%s:%d to %s:%d\n", + IP_VS_DBG_ADDR(cp->af, &cp->caddr), + ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &cp->vaddr), + ntohs(cp->vport)); + return; } - IP_VS_DBG(7, "DELeting control for: " - "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", - NIPQUAD(cp->caddr),ntohs(cp->cport), - NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); + IP_VS_DBG_BUF(7, "DELeting control for: " + "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", + IP_VS_DBG_ADDR(cp->af, &cp->caddr), + ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), + ntohs(ctl_cp->cport)); cp->control = NULL; if (atomic_read(&ctl_cp->n_control) == 0) { - IP_VS_ERR("BUG control DEL with n=0 : " - "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", - NIPQUAD(cp->caddr),ntohs(cp->cport), - NIPQUAD(cp->vaddr),ntohs(cp->vport)); + IP_VS_ERR_BUF("BUG control DEL with n=0 : " + "%s:%d to %s:%d\n", + IP_VS_DBG_ADDR(cp->af, &cp->caddr), + ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &cp->vaddr), + ntohs(cp->vport)); + return; } atomic_dec(&ctl_cp->n_control); @@ -771,167 +1263,229 @@ static inline void ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) { if (cp->control) { - IP_VS_ERR("request control ADD for already controlled: " - "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", - NIPQUAD(cp->caddr),ntohs(cp->cport), - NIPQUAD(cp->vaddr),ntohs(cp->vport)); + IP_VS_ERR_BUF("request control ADD for already controlled: " + "%s:%d to %s:%d\n", + IP_VS_DBG_ADDR(cp->af, &cp->caddr), + ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &cp->vaddr), + ntohs(cp->vport)); + ip_vs_control_del(cp); } - IP_VS_DBG(7, "ADDing control for: " - "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", - NIPQUAD(cp->caddr),ntohs(cp->cport), - NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); + IP_VS_DBG_BUF(7, "ADDing control for: " + "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", + IP_VS_DBG_ADDR(cp->af, &cp->caddr), + ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), + ntohs(ctl_cp->cport)); cp->control = ctl_cp; atomic_inc(&ctl_cp->n_control); } +/* + * IPVS netns init & cleanup functions + */ +int ip_vs_estimator_net_init(struct net *net); +int ip_vs_control_net_init(struct net *net); +int ip_vs_protocol_net_init(struct net *net); +int ip_vs_app_net_init(struct net *net); +int ip_vs_conn_net_init(struct net *net); +int ip_vs_sync_net_init(struct net *net); +void ip_vs_conn_net_cleanup(struct net *net); +void ip_vs_app_net_cleanup(struct net *net); +void ip_vs_protocol_net_cleanup(struct net *net); +void ip_vs_control_net_cleanup(struct net *net); +void ip_vs_estimator_net_cleanup(struct net *net); +void ip_vs_sync_net_cleanup(struct net *net); +void ip_vs_service_net_cleanup(struct net *net); /* * IPVS application functions * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 -extern int register_ip_vs_app(struct ip_vs_app *app); -extern void unregister_ip_vs_app(struct ip_vs_app *app); -extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern void ip_vs_unbind_app(struct ip_vs_conn *cp); -extern int -register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port); -extern int ip_vs_app_inc_get(struct ip_vs_app *inc); -extern void ip_vs_app_inc_put(struct ip_vs_app *inc); +struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); +void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); +int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); +void ip_vs_unbind_app(struct ip_vs_conn *cp); +int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, + __u16 port); +int ip_vs_app_inc_get(struct ip_vs_app *inc); +void ip_vs_app_inc_put(struct ip_vs_app *inc); + +int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); +int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); + +int register_ip_vs_pe(struct ip_vs_pe *pe); +int unregister_ip_vs_pe(struct ip_vs_pe *pe); +struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); +struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); -extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); -extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); -extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, - char *o_buf, int o_len, char *n_buf, int n_len); -extern int ip_vs_app_init(void); -extern void ip_vs_app_cleanup(void); +/* + * Use a #define to avoid all of module.h just for these trivial ops + */ +#define ip_vs_pe_get(pe) \ + if (pe && pe->module) \ + __module_get(pe->module); +#define ip_vs_pe_put(pe) \ + if (pe && pe->module) \ + module_put(pe->module); /* * IPVS protocol functions (from ip_vs_proto.c) */ -extern int ip_vs_protocol_init(void); -extern void ip_vs_protocol_cleanup(void); -extern void ip_vs_protocol_timeout_change(int flags); -extern int *ip_vs_create_timeout_table(int *table, int size); -extern int -ip_vs_set_state_timeout(int *table, int num, char **names, char *name, int to); -extern void -ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, - int offset, const char *msg); +int ip_vs_protocol_init(void); +void ip_vs_protocol_cleanup(void); +void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); +int *ip_vs_create_timeout_table(int *table, int size); +int ip_vs_set_state_timeout(int *table, int num, const char *const *names, + const char *name, int to); +void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, + const struct sk_buff *skb, int offset, + const char *msg); extern struct ip_vs_protocol ip_vs_protocol_tcp; extern struct ip_vs_protocol ip_vs_protocol_udp; extern struct ip_vs_protocol ip_vs_protocol_icmp; extern struct ip_vs_protocol ip_vs_protocol_esp; extern struct ip_vs_protocol ip_vs_protocol_ah; - +extern struct ip_vs_protocol ip_vs_protocol_sctp; /* * Registering/unregistering scheduler functions * (from ip_vs_sched.c) */ -extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); -extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); -extern int ip_vs_bind_scheduler(struct ip_vs_service *svc, - struct ip_vs_scheduler *scheduler); -extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc); -extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); -extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); -extern struct ip_vs_conn * -ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb); -extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, - struct ip_vs_protocol *pp); +int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); +int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); +int ip_vs_bind_scheduler(struct ip_vs_service *svc, + struct ip_vs_scheduler *scheduler); +void ip_vs_unbind_scheduler(struct ip_vs_service *svc, + struct ip_vs_scheduler *sched); +struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); +void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); +struct ip_vs_conn * +ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, + struct ip_vs_proto_data *pd, int *ignored, + struct ip_vs_iphdr *iph); +int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, + struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); + +void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); /* * IPVS control data and functions (from ip_vs_ctl.c) */ -extern int sysctl_ip_vs_cache_bypass; -extern int sysctl_ip_vs_expire_nodest_conn; -extern int sysctl_ip_vs_expire_quiescent_template; -extern int sysctl_ip_vs_sync_threshold[2]; -extern int sysctl_ip_vs_nat_icmp_send; extern struct ip_vs_stats ip_vs_stats; -extern struct ctl_path net_vs_ctl_path[]; - -extern struct ip_vs_service * -ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); - -static inline void ip_vs_service_put(struct ip_vs_service *svc) +extern int sysctl_ip_vs_sync_ver; + +struct ip_vs_service * +ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol, + const union nf_inet_addr *vaddr, __be16 vport); + +bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol, + const union nf_inet_addr *daddr, __be16 dport); + +int ip_vs_use_count_inc(void); +void ip_vs_use_count_dec(void); +int ip_vs_register_nl_ioctl(void); +void ip_vs_unregister_nl_ioctl(void); +int ip_vs_control_init(void); +void ip_vs_control_cleanup(void); +struct ip_vs_dest * +ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, + __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, + __u16 protocol, __u32 fwmark, __u32 flags); +void ip_vs_try_bind_dest(struct ip_vs_conn *cp); + +static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) { - atomic_dec(&svc->usecnt); + atomic_inc(&dest->refcnt); } -extern struct ip_vs_dest * -ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport); -extern int ip_vs_use_count_inc(void); -extern void ip_vs_use_count_dec(void); -extern int ip_vs_control_init(void); -extern void ip_vs_control_cleanup(void); -extern struct ip_vs_dest * -ip_vs_find_dest(__be32 daddr, __be16 dport, - __be32 vaddr, __be16 vport, __u16 protocol); -extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); +static inline void ip_vs_dest_put(struct ip_vs_dest *dest) +{ + smp_mb__before_atomic(); + atomic_dec(&dest->refcnt); +} +static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) +{ + if (atomic_dec_return(&dest->refcnt) < 0) + kfree(dest); +} /* * IPVS sync daemon data and function prototypes * (from ip_vs_sync.c) */ -extern volatile int ip_vs_sync_state; -extern volatile int ip_vs_master_syncid; -extern volatile int ip_vs_backup_syncid; -extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; -extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; -extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid); -extern int stop_sync_thread(int state); -extern void ip_vs_sync_conn(struct ip_vs_conn *cp); - +int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); +int stop_sync_thread(struct net *net, int state); +void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); /* * IPVS rate estimator prototypes (from ip_vs_est.c) */ -extern int ip_vs_new_estimator(struct ip_vs_stats *stats); -extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); -extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); +void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); +void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); +void ip_vs_zero_estimator(struct ip_vs_stats *stats); +void ip_vs_read_estimator(struct ip_vs_stats_user *dst, + struct ip_vs_stats *stats); /* * Various IPVS packet transmitters (from ip_vs_xmit.c) */ -extern int ip_vs_null_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern int ip_vs_bypass_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern int ip_vs_nat_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern int ip_vs_tunnel_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern int ip_vs_dr_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); -extern int ip_vs_icmp_xmit -(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset); -extern void ip_vs_dst_reset(struct ip_vs_dest *dest); - +int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, int offset, + unsigned int hooknum, struct ip_vs_iphdr *iph); +void ip_vs_dest_dst_rcu_free(struct rcu_head *head); + +#ifdef CONFIG_IP_VS_IPV6 +int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); +int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, int offset, + unsigned int hooknum, struct ip_vs_iphdr *iph); +#endif +#ifdef CONFIG_SYSCTL /* * This is a simple mechanism to ignore packets when * we are loaded. Just set ip_vs_drop_rate to 'n' and * we start to drop 1/rate of the packets */ -extern int ip_vs_drop_rate; -extern int ip_vs_drop_counter; -static __inline__ int ip_vs_todrop(void) +static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { - if (!ip_vs_drop_rate) return 0; - if (--ip_vs_drop_counter > 0) return 0; - ip_vs_drop_counter = ip_vs_drop_rate; + if (!ipvs->drop_rate) + return 0; + if (--ipvs->drop_counter > 0) + return 0; + ipvs->drop_counter = ipvs->drop_rate; return 1; } +#else +static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } +#endif /* * ip_vs_fwd_tag returns the forwarding tag of the connection @@ -959,25 +1513,116 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) return fwd; } -extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, - struct ip_vs_conn *cp, int dir); +void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, int dir); + +#ifdef CONFIG_IP_VS_IPV6 +void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, int dir); +#endif -extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); +__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) { __be32 diff[2] = { ~old, new }; - return csum_partial((char *) diff, sizeof(diff), oldsum); + return csum_partial(diff, sizeof(diff), oldsum); +} + +#ifdef CONFIG_IP_VS_IPV6 +static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, + __wsum oldsum) +{ + __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], + new[3], new[2], new[1], new[0] }; + + return csum_partial(diff, sizeof(diff), oldsum); } +#endif static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) { __be16 diff[2] = { ~old, new }; - return csum_partial((char *) diff, sizeof(diff), oldsum); + return csum_partial(diff, sizeof(diff), oldsum); +} + +/* + * Forget current conntrack (unconfirmed) and attach notrack entry + */ +static inline void ip_vs_notrack(struct sk_buff *skb) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + + if (!ct || !nf_ct_is_untracked(ct)) { + nf_conntrack_put(skb->nfct); + skb->nfct = &nf_ct_untracked_get()->ct_general; + skb->nfctinfo = IP_CT_NEW; + nf_conntrack_get(skb->nfct); + } +#endif } -#endif /* __KERNEL__ */ +#ifdef CONFIG_IP_VS_NFCT +/* + * Netfilter connection tracking + * (from ip_vs_nfct.c) + */ +static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) +{ +#ifdef CONFIG_SYSCTL + return ipvs->sysctl_conntrack; +#else + return 0; +#endif +} + +void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, + int outin); +int ip_vs_confirm_conntrack(struct sk_buff *skb); +void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, + struct ip_vs_conn *cp, u_int8_t proto, + const __be16 port, int from_rs); +void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); + +#else + +static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) +{ + return 0; +} + +static inline void ip_vs_update_conntrack(struct sk_buff *skb, + struct ip_vs_conn *cp, int outin) +{ +} + +static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) +{ + return NF_ACCEPT; +} + +static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) +{ +} +/* CONFIG_IP_VS_NFCT */ +#endif + +static inline int +ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) +{ + /* + * We think the overhead of processing active connections is 256 + * times higher than that of inactive connections in average. (This + * 256 times might not be accurate, we will change it later) We + * use the following formula to estimate the overhead now: + * dest->activeconns*256 + dest->inactconns + */ + return (atomic_read(&dest->activeconns) << 8) + + atomic_read(&dest->inactconns); +} -#endif /* _IP_VS_H */ +#endif /* _NET_IP_VS_H */ diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index 330b74e813a..cc4f30cd731 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -9,11 +9,17 @@ struct crypto_comp; struct ipcomp_data { u16 threshold; - struct crypto_comp **tfms; + struct crypto_comp * __percpu *tfms; }; struct ip_comp_hdr; struct sk_buff; +struct xfrm_state; + +int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb); +int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb); +void ipcomp_destroy(struct xfrm_state *x); +int ipcomp_init_state(struct xfrm_state *x); static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb) { diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h index 3924d7d2cb1..c74cc1bd5a0 100644 --- a/include/net/ipconfig.h +++ b/include/net/ipconfig.h @@ -1,6 +1,4 @@ /* - * $Id: ipconfig.h,v 1.4 2001/04/30 04:51:46 davem Exp $ - * * Copyright (C) 1997 Martin Mares * * Automatic IP Layer Configuration diff --git a/include/net/ipip.h b/include/net/ipip.h deleted file mode 100644 index 549e132bca9..00000000000 --- a/include/net/ipip.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef __NET_IPIP_H -#define __NET_IPIP_H 1 - -#include <linux/if_tunnel.h> -#include <net/ip.h> - -/* Keep error state on tunnel for 30 sec */ -#define IPTUNNEL_ERR_TIMEO (30*HZ) - -struct ip_tunnel -{ - struct ip_tunnel *next; - struct net_device *dev; - struct net_device_stats stat; - - int recursion; /* Depth of hard_start_xmit recursion */ - int err_count; /* Number of arrived ICMP errors */ - unsigned long err_time; /* Time when the last ICMP error arrived */ - - /* These four fields used only by GRE */ - __u32 i_seqno; /* The last seen seqno */ - __u32 o_seqno; /* The last output seqno */ - int hlen; /* Precalculated GRE header length */ - int mlink; - - struct ip_tunnel_parm parms; -}; - -#define IPTUNNEL_XMIT() do { \ - int err; \ - int pkt_len = skb->len; \ - \ - skb->ip_summed = CHECKSUM_NONE; \ - ip_select_ident(iph, &rt->u.dst, NULL); \ - \ - err = ip_local_out(skb); \ - if (net_xmit_eval(err) == 0) { \ - stats->tx_bytes += pkt_len; \ - stats->tx_packets++; \ - } else { \ - stats->tx_errors++; \ - stats->tx_aborted_errors++; \ - } \ -} while (0) - -#endif diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c0c019f72ba..574337fe72d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -4,8 +4,6 @@ * Authors: * Pedro Roque <roque@di.fc.ul.pt> * - * $Id: ipv6.h,v 1.1 2002/05/20 15:13:07 jgrimm Exp $ - * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -17,6 +15,7 @@ #include <linux/ipv6.h> #include <linux/hardirq.h> +#include <linux/jhash.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/flow.h> @@ -36,11 +35,13 @@ #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ #define NEXTHDR_ROUTING 43 /* Routing header. */ #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ +#define NEXTHDR_GRE 47 /* GRE header. */ #define NEXTHDR_ESP 50 /* Encapsulating security payload. */ #define NEXTHDR_AUTH 51 /* Authentication header. */ #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ #define NEXTHDR_NONE 59 /* No next header */ #define NEXTHDR_DEST 60 /* Destination options header. */ +#define NEXTHDR_SCTP 132 /* SCTP message. */ #define NEXTHDR_MOBILITY 135 /* Mobility header. */ #define NEXTHDR_MAX 255 @@ -75,16 +76,13 @@ #define IPV6_ADDR_SCOPE_MASK 0x00f0U #define IPV6_ADDR_MAPPED 0x1000U -#define IPV6_ADDR_RESERVED 0x2000U /* reserved address space */ /* * Addr scopes */ -#ifdef __KERNEL__ #define IPV6_ADDR_MC_SCOPE(a) \ ((a)->s6_addr[1] & 0x0f) /* nonstandard */ #define __IPV6_ADDR_SCOPE_INVALID -1 -#endif #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 @@ -92,6 +90,16 @@ #define IPV6_ADDR_SCOPE_GLOBAL 0x0e /* + * Addr flags + */ +#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ + ((a)->s6_addr[1] & 0x10) +#define IPV6_ADDR_MC_FLAG_PREFIX(a) \ + ((a)->s6_addr[1] & 0x20) +#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ + ((a)->s6_addr[1] & 0x40) + +/* * fragmentation header */ @@ -102,62 +110,86 @@ struct frag_hdr { __be32 identification; }; -#define IP6_MF 0x0001 +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 -#ifdef __KERNEL__ +#define IP6_REPLY_MARK(net, mark) \ + ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) #include <net/sock.h> /* sysctls */ extern int sysctl_mld_max_msf; -extern struct ctl_path net_ipv6_ctl_path[]; -#define _DEVINC(statname, modifier, idev, field) \ +#define _DEVINC(net, statname, modifier, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \ - SNMP_INC_STATS##modifier(statname##_statistics, (field)); \ + SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ +}) + +/* per device counters are atomic_long_t */ +#define _DEVINCATOMIC(net, statname, modifier, idev, field) \ +({ \ + struct inet6_dev *_idev = (idev); \ + if (likely(_idev != NULL)) \ + SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ + SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ }) -#define _DEVADD(statname, modifier, idev, field, val) \ +/* per device and per net counters are atomic_long_t */ +#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ +({ \ + struct inet6_dev *_idev = (idev); \ + if (likely(_idev != NULL)) \ + SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ + SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ +}) + +#define _DEVADD(net, statname, modifier, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \ - SNMP_ADD_STATS##modifier(statname##_statistics, (field), (val));\ + SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\ +}) + +#define _DEVUPD(net, statname, modifier, idev, field, val) \ +({ \ + struct inet6_dev *_idev = (idev); \ + if (likely(_idev != NULL)) \ + SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \ + SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\ }) /* MIBs */ -DECLARE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); - -#define IP6_INC_STATS(idev,field) _DEVINC(ipv6, , idev, field) -#define IP6_INC_STATS_BH(idev,field) _DEVINC(ipv6, _BH, idev, field) -#define IP6_INC_STATS_USER(idev,field) _DEVINC(ipv6, _USER, idev, field) -#define IP6_ADD_STATS_BH(idev,field,val) _DEVADD(ipv6, _BH, idev, field, val) - -DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); -DECLARE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); - -#define ICMP6_INC_STATS(idev, field) _DEVINC(icmpv6, , idev, field) -#define ICMP6_INC_STATS_BH(idev, field) _DEVINC(icmpv6, _BH, idev, field) -#define ICMP6_INC_STATS_USER(idev, field) _DEVINC(icmpv6, _USER, idev, field) - -#define ICMP6MSGOUT_INC_STATS(idev, field) \ - _DEVINC(icmpv6msg, , idev, field +256) -#define ICMP6MSGOUT_INC_STATS_BH(idev, field) \ - _DEVINC(icmpv6msg, _BH, idev, field +256) -#define ICMP6MSGOUT_INC_STATS_USER(idev, field) \ - _DEVINC(icmpv6msg, _USER, idev, field +256) -#define ICMP6MSGIN_INC_STATS(idev, field) \ - _DEVINC(icmpv6msg, , idev, field) -#define ICMP6MSGIN_INC_STATS_BH(idev, field) \ - _DEVINC(icmpv6msg, _BH, idev, field) -#define ICMP6MSGIN_INC_STATS_USER(idev, field) \ - _DEVINC(icmpv6msg, _USER, idev, field) - -struct ip6_ra_chain -{ + +#define IP6_INC_STATS(net, idev,field) \ + _DEVINC(net, ipv6, 64, idev, field) +#define IP6_INC_STATS_BH(net, idev,field) \ + _DEVINC(net, ipv6, 64_BH, idev, field) +#define IP6_ADD_STATS(net, idev,field,val) \ + _DEVADD(net, ipv6, 64, idev, field, val) +#define IP6_ADD_STATS_BH(net, idev,field,val) \ + _DEVADD(net, ipv6, 64_BH, idev, field, val) +#define IP6_UPD_PO_STATS(net, idev,field,val) \ + _DEVUPD(net, ipv6, 64, idev, field, val) +#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \ + _DEVUPD(net, ipv6, 64_BH, idev, field, val) +#define ICMP6_INC_STATS(net, idev, field) \ + _DEVINCATOMIC(net, icmpv6, , idev, field) +#define ICMP6_INC_STATS_BH(net, idev, field) \ + _DEVINCATOMIC(net, icmpv6, _BH, idev, field) + +#define ICMP6MSGOUT_INC_STATS(net, idev, field) \ + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) +#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) +#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \ + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) + +struct ip6_ra_chain { struct ip6_ra_chain *next; struct sock *sk; int sel; @@ -172,8 +204,7 @@ extern rwlock_t ip6_ra_lock; ancillary data and passed to IPv6. */ -struct ipv6_txoptions -{ +struct ipv6_txoptions { /* Length of this structure */ int tot_len; @@ -190,37 +221,45 @@ struct ipv6_txoptions /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ }; -struct ip6_flowlabel -{ - struct ip6_flowlabel *next; +struct ip6_flowlabel { + struct ip6_flowlabel __rcu *next; __be32 label; atomic_t users; struct in6_addr dst; struct ipv6_txoptions *opt; unsigned long linger; + struct rcu_head rcu; u8 share; - u32 owner; + union { + struct pid *pid; + kuid_t uid; + } owner; unsigned long lastuse; unsigned long expires; + struct net *fl_net; }; -#define IPV6_FLOWINFO_MASK __constant_htonl(0x0FFFFFFF) -#define IPV6_FLOWLABEL_MASK __constant_htonl(0x000FFFFF) +#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) +#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) +#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) +#define IPV6_TCLASS_SHIFT 20 -struct ipv6_fl_socklist -{ - struct ipv6_fl_socklist *next; - struct ip6_flowlabel *fl; +struct ipv6_fl_socklist { + struct ipv6_fl_socklist __rcu *next; + struct ip6_flowlabel *fl; + struct rcu_head rcu; }; -extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); -extern struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, - struct ip6_flowlabel * fl, - struct ipv6_txoptions * fopt); -extern void fl6_free_socklist(struct sock *sk); -extern int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); -extern int ip6_flowlabel_init(void); -extern void ip6_flowlabel_cleanup(void); +struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); +struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, + struct ip6_flowlabel *fl, + struct ipv6_txoptions *fopt); +void fl6_free_socklist(struct sock *sk); +int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); +int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, + int flags); +int ip6_flowlabel_init(void); +void ip6_flowlabel_cleanup(void); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { @@ -228,37 +267,53 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl) atomic_dec(&fl->users); } -extern int ip6_ra_control(struct sock *sk, int sel, - void (*destructor)(struct sock *)); +void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); +int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, + struct icmp6hdr *thdr, int len); -extern int ipv6_parse_hopopts(struct sk_buff *skb); +int ip6_ra_control(struct sock *sk, int sel); -extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt); -extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr __user *newopt, - int newoptlen); +int ipv6_parse_hopopts(struct sk_buff *skb); + +struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, + struct ipv6_txoptions *opt); +struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, + struct ipv6_txoptions *opt, + int newtype, + struct ipv6_opt_hdr __user *newopt, + int newoptlen); struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); -extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb); +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); -int ip6_frag_nqueues(struct net *net); -int ip6_frag_mem(struct net *net); +static inline bool ipv6_accept_ra(struct inet6_dev *idev) +{ + /* If forwarding is enabled, RA are not accepted unless the special + * hybrid mode (accept_ra=2) is enabled. + */ + return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 : + idev->cnf.accept_ra; +} -#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ +#if IS_ENABLED(CONFIG_IPV6) +static inline int ip6_frag_nqueues(struct net *net) +{ + return net->ipv6.frags.nqueues; +} -/* - * Function prototype for build_xmit - */ +static inline int ip6_frag_mem(struct net *net) +{ + return sum_frag_mem_limit(&net->ipv6.frags); +} +#endif -typedef int (*inet_getfrag_t) (const void *data, - struct in6_addr *addr, - char *, - unsigned int, unsigned int); +#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ +#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ +#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ -extern int __ipv6_addr_type(const struct in6_addr *addr); +int __ipv6_addr_type(const struct in6_addr *addr); static inline int ipv6_addr_type(const struct in6_addr *addr) { return __ipv6_addr_type(addr) & 0xffff; @@ -271,7 +326,7 @@ static inline int ipv6_addr_scope(const struct in6_addr *addr) static inline int __ipv6_addr_src_scope(int type) { - return (type == IPV6_ADDR_ANY ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16)); + return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); } static inline int ipv6_addr_src_scope(const struct in6_addr *addr) @@ -279,26 +334,40 @@ static inline int ipv6_addr_src_scope(const struct in6_addr *addr) return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); } +static inline bool __ipv6_addr_needs_scope_id(int type) +{ + return type & IPV6_ADDR_LINKLOCAL || + (type & IPV6_ADDR_MULTICAST && + (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); +} + +static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) +{ + return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; +} + static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) { return memcmp(a1, a2, sizeof(struct in6_addr)); } -static inline int +static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) { - unsigned int i; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul1 = (const unsigned long *)a1; + const unsigned long *ulm = (const unsigned long *)m; + const unsigned long *ul2 = (const unsigned long *)a2; - for (i = 0; i < 4; i++) - if ((a1->s6_addr32[i] ^ a2->s6_addr32[i]) & m->s6_addr32[i]) - return 1; - return 0; -} - -static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) -{ - memcpy(a1, a2, sizeof(struct in6_addr)); + return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | + ((ul1[1] ^ ul2[1]) & ulm[1])); +#else + return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | + ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | + ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | + ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); +#endif } static inline void ipv6_addr_prefix(struct in6_addr *pfx, @@ -315,79 +384,227 @@ static inline void ipv6_addr_prefix(struct in6_addr *pfx, pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); } +static inline void __ipv6_addr_set_half(__be32 *addr, + __be32 wh, __be32 wl) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 +#if defined(__BIG_ENDIAN) + if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { + *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); + return; + } +#elif defined(__LITTLE_ENDIAN) + if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { + *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); + return; + } +#endif +#endif + addr[0] = wh; + addr[1] = wl; +} + static inline void ipv6_addr_set(struct in6_addr *addr, __be32 w1, __be32 w2, __be32 w3, __be32 w4) { - addr->s6_addr32[0] = w1; - addr->s6_addr32[1] = w2; - addr->s6_addr32[2] = w3; - addr->s6_addr32[3] = w4; + __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); + __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); +} + +static inline bool ipv6_addr_equal(const struct in6_addr *a1, + const struct in6_addr *a2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul1 = (const unsigned long *)a1; + const unsigned long *ul2 = (const unsigned long *)a2; + + return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; +#else + return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | + (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | + (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | + (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; +#endif } -static inline int ipv6_addr_equal(const struct in6_addr *a1, - const struct in6_addr *a2) +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 +static inline bool __ipv6_prefix_equal64_half(const __be64 *a1, + const __be64 *a2, + unsigned int len) { - return (a1->s6_addr32[0] == a2->s6_addr32[0] && - a1->s6_addr32[1] == a2->s6_addr32[1] && - a1->s6_addr32[2] == a2->s6_addr32[2] && - a1->s6_addr32[3] == a2->s6_addr32[3]); + if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) + return false; + return true; } -static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, - unsigned int prefixlen) +static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, + const struct in6_addr *addr2, + unsigned int prefixlen) { - unsigned pdw, pbi; + const __be64 *a1 = (const __be64 *)addr1; + const __be64 *a2 = (const __be64 *)addr2; + + if (prefixlen >= 64) { + if (a1[0] ^ a2[0]) + return false; + return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); + } + return __ipv6_prefix_equal64_half(a1, a2, prefixlen); +} +#else +static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, + const struct in6_addr *addr2, + unsigned int prefixlen) +{ + const __be32 *a1 = addr1->s6_addr32; + const __be32 *a2 = addr2->s6_addr32; + unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) - return 0; + return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) - return 0; + return false; - return 1; -} - -static inline int ipv6_prefix_equal(const struct in6_addr *a1, - const struct in6_addr *a2, - unsigned int prefixlen) -{ - return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32, - prefixlen); + return true; } +#endif struct inet_frag_queue; +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER, + IP6_DEFRAG_CONNTRACK_IN, + __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, + IP6_DEFRAG_CONNTRACK_OUT, + __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, +}; + struct ip6_create_arg { __be32 id; - struct in6_addr *src; - struct in6_addr *dst; + u32 user; + const struct in6_addr *src; + const struct in6_addr *dst; + u8 ecn; }; void ip6_frag_init(struct inet_frag_queue *q, void *a); -int ip6_frag_match(struct inet_frag_queue *q, void *a); +bool ip6_frag_match(struct inet_frag_queue *q, void *a); + +/* + * Equivalent of ipv4 struct ip + */ +struct frag_queue { + struct inet_frag_queue q; + + __be32 id; /* fragment id */ + u32 user; + struct in6_addr saddr; + struct in6_addr daddr; + + int iif; + unsigned int csum; + __u16 nhoffset; + u8 ecn; +}; + +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, + struct inet_frags *frags); + +static inline bool ipv6_addr_any(const struct in6_addr *a) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul = (const unsigned long *)a; + + return (ul[0] | ul[1]) == 0UL; +#else + return (a->s6_addr32[0] | a->s6_addr32[1] | + a->s6_addr32[2] | a->s6_addr32[3]) == 0; +#endif +} + +static inline u32 ipv6_addr_hash(const struct in6_addr *a) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul = (const unsigned long *)a; + unsigned long x = ul[0] ^ ul[1]; + + return (u32)(x ^ (x >> 32)); +#else + return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ + a->s6_addr32[2] ^ a->s6_addr32[3]); +#endif +} + +/* more secured version of ipv6_addr_hash() */ +static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) +{ + u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; + + return jhash_3words(v, + (__force u32)a->s6_addr32[2], + (__force u32)a->s6_addr32[3], + initval); +} + +static inline bool ipv6_addr_loopback(const struct in6_addr *a) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul = (const unsigned long *)a; + + return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL; +#else + return (a->s6_addr32[0] | a->s6_addr32[1] | + a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0; +#endif +} + +static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) +{ + return ( +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + *(__be64 *)a | +#else + (a->s6_addr32[0] | a->s6_addr32[1]) | +#endif + (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL; +} + +/* + * Check for a RFC 4843 ORCHID address + * (Overlay Routable Cryptographic Hash Identifiers) + */ +static inline bool ipv6_addr_orchid(const struct in6_addr *a) +{ + return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); +} -static inline int ipv6_addr_any(const struct in6_addr *a) +static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr) { - return ((a->s6_addr32[0] | a->s6_addr32[1] | - a->s6_addr32[2] | a->s6_addr32[3] ) == 0); + return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); } -static inline int ipv6_addr_v4mapped(const struct in6_addr *a) +static inline void ipv6_addr_set_v4mapped(const __be32 addr, + struct in6_addr *v4mapped) { - return ((a->s6_addr32[0] | a->s6_addr32[1]) == 0 && - a->s6_addr32[2] == htonl(0x0000ffff)); + ipv6_addr_set(v4mapped, + 0, 0, + htonl(0x0000FFFF), + addr); } /* * find the first different bit between two addresses * length of address must be a multiple of 32bits */ -static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) +static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen) { const __be32 *a1 = token1, *a2 = token2; int i; @@ -397,7 +614,7 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a for (i = 0; i < addrlen; i++) { __be32 xb = a1[i] ^ a2[i]; if (xb) - return i * 32 + 32 - fls(ntohl(xb)); + return i * 32 + 31 - __fls(ntohl(xb)); } /* @@ -416,7 +633,34 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a * if returned value is greater than prefix length. * --ANK (980803) */ - return (addrlen << 5); + return addrlen << 5; +} + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 +static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen) +{ + const __be64 *a1 = token1, *a2 = token2; + int i; + + addrlen >>= 3; + + for (i = 0; i < addrlen; i++) { + __be64 xb = a1[i] ^ a2[i]; + if (xb) + return i * 64 + 63 - __fls(be64_to_cpu(xb)); + } + + return addrlen << 6; +} +#endif + +static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + if (__builtin_constant_p(addrlen) && !(addrlen & 7)) + return __ipv6_addr_diff64(token1, token2, addrlen); +#endif + return __ipv6_addr_diff32(token1, token2, addrlen); } static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) @@ -424,6 +668,45 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } +int ip6_dst_hoplimit(struct dst_entry *dst); + +static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, + struct dst_entry *dst) +{ + int hlimit; + + if (ipv6_addr_is_multicast(&fl6->daddr)) + hlimit = np->mcast_hops; + else + hlimit = np->hop_limit; + if (hlimit < 0) + hlimit = ip6_dst_hoplimit(dst); + return hlimit; +} + +/* + * Header manipulation + */ +static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, + __be32 flowlabel) +{ + *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel; +} + +static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr) +{ + return *(__be32 *)hdr & IPV6_FLOWINFO_MASK; +} + +static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) +{ + return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; +} + +static inline u8 ip6_tclass(__be32 flowinfo) +{ + return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; +} /* * Prototypes exported by ipv6 */ @@ -432,140 +715,113 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add * rcv function (called from netdevice level) */ -extern int ipv6_rcv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev); +int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); -extern int ip6_rcv_finish(struct sk_buff *skb); +int ip6_rcv_finish(struct sk_buff *skb); /* * upper-layer output functions */ -extern int ip6_xmit(struct sock *sk, - struct sk_buff *skb, - struct flowi *fl, - struct ipv6_txoptions *opt, - int ipfragok); - -extern int ip6_nd_hdr(struct sock *sk, - struct sk_buff *skb, - struct net_device *dev, - struct in6_addr *saddr, - struct in6_addr *daddr, - int proto, int len); - -extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); - -extern int ip6_append_data(struct sock *sk, - int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, - int length, - int transhdrlen, - int hlimit, - int tclass, - struct ipv6_txoptions *opt, - struct flowi *fl, - struct rt6_info *rt, - unsigned int flags); - -extern int ip6_push_pending_frames(struct sock *sk); - -extern void ip6_flush_pending_frames(struct sock *sk); - -extern int ip6_dst_lookup(struct sock *sk, - struct dst_entry **dst, - struct flowi *fl); -extern int ip6_dst_blackhole(struct sock *sk, - struct dst_entry **dst, - struct flowi *fl); -extern int ip6_sk_dst_lookup(struct sock *sk, - struct dst_entry **dst, - struct flowi *fl); +int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, + struct ipv6_txoptions *opt, int tclass); + +int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); + +int ip6_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, int len, + int odd, struct sk_buff *skb), + void *from, int length, int transhdrlen, int hlimit, + int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, + struct rt6_info *rt, unsigned int flags, int dontfrag); + +int ip6_push_pending_frames(struct sock *sk); + +void ip6_flush_pending_frames(struct sock *sk); + +int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); +struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); +struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); +struct dst_entry *ip6_blackhole_route(struct net *net, + struct dst_entry *orig_dst); /* * skb processing functions */ -extern int ip6_output(struct sk_buff *skb); -extern int ip6_forward(struct sk_buff *skb); -extern int ip6_input(struct sk_buff *skb); -extern int ip6_mc_input(struct sk_buff *skb); +int ip6_output(struct sock *sk, struct sk_buff *skb); +int ip6_forward(struct sk_buff *skb); +int ip6_input(struct sk_buff *skb); +int ip6_mc_input(struct sk_buff *skb); -extern int __ip6_local_out(struct sk_buff *skb); -extern int ip6_local_out(struct sk_buff *skb); +int __ip6_local_out(struct sk_buff *skb); +int ip6_local_out(struct sk_buff *skb); /* * Extension header (options) processing */ -extern u8 * ipv6_build_nfrag_opts(struct sk_buff *skb, - u8 *prev_hdr, - struct ipv6_txoptions *opt, - struct in6_addr *daddr, - u32 jumbolen); -extern u8 * ipv6_build_frag_opts(struct sk_buff *skb, - u8 *prev_hdr, - struct ipv6_txoptions *opt); -extern void ipv6_push_nfrag_opts(struct sk_buff *skb, - struct ipv6_txoptions *opt, - u8 *proto, - struct in6_addr **daddr_p); -extern void ipv6_push_frag_opts(struct sk_buff *skb, - struct ipv6_txoptions *opt, - u8 *proto); - -extern int ipv6_skip_exthdr(const struct sk_buff *, int start, - u8 *nexthdrp); - -extern int ipv6_ext_hdr(u8 nexthdr); - -extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); +void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, + u8 *proto, struct in6_addr **daddr_p); +void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, + u8 *proto); + +int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, + __be16 *frag_offp); + +bool ipv6_ext_hdr(u8 nexthdr); + +enum { + IP6_FH_F_FRAG = (1 << 0), + IP6_FH_F_AUTH = (1 << 1), + IP6_FH_F_SKIP_RH = (1 << 2), +}; + +/* find specified header and get offset to it */ +int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, + unsigned short *fragoff, int *fragflg); + +int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); + +struct in6_addr *fl6_update_dst(struct flowi6 *fl6, + const struct ipv6_txoptions *opt, + struct in6_addr *orig); /* * socket options (ipv6_sockglue.c) */ -extern int ipv6_setsockopt(struct sock *sk, int level, - int optname, - char __user *optval, - int optlen); -extern int ipv6_getsockopt(struct sock *sk, int level, - int optname, - char __user *optval, - int __user *optlen); -extern int compat_ipv6_setsockopt(struct sock *sk, - int level, - int optname, - char __user *optval, - int optlen); -extern int compat_ipv6_getsockopt(struct sock *sk, - int level, - int optname, - char __user *optval, - int __user *optlen); - -extern int ipv6_packet_init(void); - -extern void ipv6_packet_cleanup(void); - -extern int ip6_datagram_connect(struct sock *sk, - struct sockaddr *addr, int addr_len); - -extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); -extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, - u32 info, u8 *payload); -extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info); - -extern int inet6_release(struct socket *sock); -extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, - int addr_len); -extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer); -extern int inet6_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg); - -extern int inet6_hash_connect(struct inet_timewait_death_row *death_row, +int ipv6_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +int ipv6_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); + +int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); +int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, + int addr_len); + +int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); +int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); +void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, + u32 info, u8 *payload); +void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); +void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); + +int inet6_release(struct socket *sock); +int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, + int peer); +int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); /* @@ -577,51 +833,42 @@ extern const struct proto_ops inet6_dgram_ops; struct group_source_req; struct group_filter; -extern int ip6_mc_source(int add, int omode, struct sock *sk, - struct group_source_req *pgsr); -extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf); -extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, - struct group_filter __user *optval, - int __user *optlen); +int ip6_mc_source(int add, int omode, struct sock *sk, + struct group_source_req *pgsr); +int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf); +int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, + struct group_filter __user *optval, int __user *optlen); #ifdef CONFIG_PROC_FS -extern int ac6_proc_init(void); -extern void ac6_proc_exit(void); -extern int raw6_proc_init(void); -extern void raw6_proc_exit(void); -extern int tcp6_proc_init(void); -extern void tcp6_proc_exit(void); -extern int udp6_proc_init(void); -extern void udp6_proc_exit(void); -extern int udplite6_proc_init(void); -extern void udplite6_proc_exit(void); -extern int ipv6_misc_proc_init(void); -extern void ipv6_misc_proc_exit(void); -extern int snmp6_register_dev(struct inet6_dev *idev); -extern int snmp6_unregister_dev(struct inet6_dev *idev); - -extern struct rt6_statistics rt6_stats; -#else -static inline int snmp6_register_dev(struct inet6_dev *idev) -{ - return 0; -} +int ac6_proc_init(struct net *net); +void ac6_proc_exit(struct net *net); +int raw6_proc_init(void); +void raw6_proc_exit(void); +int tcp6_proc_init(struct net *net); +void tcp6_proc_exit(struct net *net); +int udp6_proc_init(struct net *net); +void udp6_proc_exit(struct net *net); +int udplite6_proc_init(void); +void udplite6_proc_exit(void); +int ipv6_misc_proc_init(void); +void ipv6_misc_proc_exit(void); +int snmp6_register_dev(struct inet6_dev *idev); +int snmp6_unregister_dev(struct inet6_dev *idev); -static inline int snmp6_unregister_dev(struct inet6_dev *idev) -{ - return 0; -} +#else +static inline int ac6_proc_init(struct net *net) { return 0; } +static inline void ac6_proc_exit(struct net *net) { } +static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } +static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL -extern ctl_table ipv6_route_table_template[]; -extern ctl_table ipv6_icmp_table_template[]; +extern struct ctl_table ipv6_route_table_template[]; -extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); -extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); -extern int ipv6_sysctl_register(void); -extern void ipv6_sysctl_unregister(void); +struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); +struct ctl_table *ipv6_route_sysctl_init(struct net *net); +int ipv6_sysctl_register(void); +void ipv6_sysctl_unregister(void); #endif -#endif /* __KERNEL__ */ #endif /* _NET_IPV6_H */ diff --git a/include/net/ipx.h b/include/net/ipx.h index 4cc0b4eca94..0143180fecc 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -13,6 +13,7 @@ #include <net/datalink.h> #include <linux/ipx.h> #include <linux/list.h> +#include <linux/slab.h> struct ipx_address { __be32 net; @@ -26,9 +27,9 @@ struct ipx_address { #define IPX_MAX_PPROP_HOPS 8 struct ipxhdr { - __be16 ipx_checksum __attribute__ ((packed)); -#define IPX_NO_CHECKSUM __constant_htons(0xFFFF) - __be16 ipx_pktsize __attribute__ ((packed)); + __be16 ipx_checksum __packed; +#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF) + __be16 ipx_pktsize __packed; __u8 ipx_tctrl; __u8 ipx_type; #define IPX_TYPE_UNKNOWN 0x00 @@ -37,8 +38,8 @@ struct ipxhdr { #define IPX_TYPE_SPX 0x05 /* SPX protocol */ #define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */ #define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */ - struct ipx_address ipx_dest __attribute__ ((packed)); - struct ipx_address ipx_source __attribute__ ((packed)); + struct ipx_address ipx_dest __packed; + struct ipx_address ipx_source __packed; }; static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) @@ -79,7 +80,6 @@ struct ipx_route { atomic_t refcnt; }; -#ifdef __KERNEL__ struct ipx_cb { u8 ipx_tctrl; __be32 ipx_dest_net; @@ -115,7 +115,6 @@ static inline struct ipx_sock *ipx_sk(struct sock *sk) } #define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0])) -#endif #define IPX_MIN_EPHEMERAL_SOCKET 0x4000 #define IPX_MAX_EPHEMERAL_SOCKET 0x7fff @@ -124,23 +123,34 @@ extern struct list_head ipx_routes; extern rwlock_t ipx_routes_lock; extern struct list_head ipx_interfaces; -extern struct ipx_interface *ipx_interfaces_head(void); +struct ipx_interface *ipx_interfaces_head(void); extern spinlock_t ipx_interfaces_lock; extern struct ipx_interface *ipx_primary_net; -extern int ipx_proc_init(void); -extern void ipx_proc_exit(void); +int ipx_proc_init(void); +void ipx_proc_exit(void); -extern const char *ipx_frame_name(__be16); -extern const char *ipx_device_name(struct ipx_interface *intrfc); +const char *ipx_frame_name(__be16); +const char *ipx_device_name(struct ipx_interface *intrfc); static __inline__ void ipxitf_hold(struct ipx_interface *intrfc) { atomic_inc(&intrfc->refcnt); } -extern void ipxitf_down(struct ipx_interface *intrfc); +void ipxitf_down(struct ipx_interface *intrfc); +struct ipx_interface *ipxitf_find_using_net(__be32 net); +int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node); +__be16 ipx_cksum(struct ipxhdr *packet, int length); +int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, + unsigned char *node); +void ipxrtr_del_routes(struct ipx_interface *intrfc); +int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, + struct iovec *iov, size_t len, int noblock); +int ipxrtr_route_skb(struct sk_buff *skb); +struct ipx_route *ipxrtr_lookup(__be32 net); +int ipxrtr_ioctl(unsigned int cmd, void __user *arg); static __inline__ void ipxitf_put(struct ipx_interface *intrfc) { diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h index e4efad1f9ef..63ae3253056 100644 --- a/include/net/irda/discovery.h +++ b/include/net/irda/discovery.h @@ -23,9 +23,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -57,9 +55,6 @@ typedef union { __u8 byte[2]; } __u16_host_order; -/* Same purpose, different application */ -#define u16ho(array) (* ((__u16 *) array)) - /* Types of discovery */ typedef enum { DISCOVERY_LOG, /* What's in our discovery log */ diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h index 69b610acd2d..2a580ce9eda 100644 --- a/include/net/irda/ircomm_core.h +++ b/include/net/irda/ircomm_core.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ diff --git a/include/net/irda/ircomm_event.h b/include/net/irda/ircomm_event.h index c290447872d..5bbc32998d5 100644 --- a/include/net/irda/ircomm_event.h +++ b/include/net/irda/ircomm_event.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -74,7 +72,7 @@ struct ircomm_info { struct qos_info *qos; }; -extern char *ircomm_state[]; +extern const char *const ircomm_state[]; struct ircomm_cb; /* Forward decl. */ diff --git a/include/net/irda/ircomm_lmp.h b/include/net/irda/ircomm_lmp.h index ae02106be59..5042a5021a0 100644 --- a/include/net/irda/ircomm_lmp.h +++ b/include/net/irda/ircomm_lmp.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ diff --git a/include/net/irda/ircomm_param.h b/include/net/irda/ircomm_param.h index e6678800c41..1f67432321c 100644 --- a/include/net/irda/ircomm_param.h +++ b/include/net/irda/ircomm_param.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ diff --git a/include/net/irda/ircomm_ttp.h b/include/net/irda/ircomm_ttp.h index 403081ed725..c5627288bca 100644 --- a/include/net/irda/ircomm_ttp.h +++ b/include/net/irda/ircomm_ttp.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h index eea2e615238..8d4f588974b 100644 --- a/include/net/irda/ircomm_tty.h +++ b/include/net/irda/ircomm_tty.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -52,21 +50,16 @@ /* Same for payload size. See qos.c for the smallest max data size */ #define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED) -/* Those are really defined in include/linux/serial.h - Jean II */ -#define ASYNC_B_INITIALIZED 31 /* Serial port was initialized */ -#define ASYNC_B_NORMAL_ACTIVE 29 /* Normal device is active */ -#define ASYNC_B_CLOSING 27 /* Serial port is closing */ - /* * IrCOMM TTY driver state */ struct ircomm_tty_cb { irda_queue_t queue; /* Must be first */ + struct tty_port port; magic_t magic; int state; /* Connect state */ - struct tty_struct *tty; struct ircomm_cb *ircomm; /* IrCOMM layer instance */ struct sk_buff *tx_skb; /* Transmit buffer */ @@ -80,7 +73,6 @@ struct ircomm_tty_cb { LOCAL_FLOW flow; /* IrTTP flow status */ int line; - unsigned long flags; __u8 dlsap_sel; __u8 slsap_sel; @@ -97,19 +89,10 @@ struct ircomm_tty_cb { void *skey; void *ckey; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; struct timer_list watchdog_timer; struct work_struct tqueue; - unsigned short close_delay; - unsigned short closing_wait; /* time to wait before closing */ - - int open_count; - int blocked_open; /* # of blocked opens */ - /* Protect concurent access to : - * o self->open_count * o self->ctrl_skb * o self->tx_skb * Maybe other things may gain to be protected as well... @@ -120,13 +103,13 @@ struct ircomm_tty_cb { void ircomm_tty_start(struct tty_struct *tty); void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self); -extern int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file); -extern int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear); -extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg); -extern void ircomm_tty_set_termios(struct tty_struct *tty, - struct ktermios *old_termios); +int ircomm_tty_tiocmget(struct tty_struct *tty); +int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, + unsigned int clear); +int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg); +void ircomm_tty_set_termios(struct tty_struct *tty, + struct ktermios *old_termios); #endif diff --git a/include/net/irda/ircomm_tty_attach.h b/include/net/irda/ircomm_tty_attach.h index f91a5695aa4..20dcbdf258c 100644 --- a/include/net/irda/ircomm_tty_attach.h +++ b/include/net/irda/ircomm_tty_attach.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -66,8 +64,8 @@ struct ircomm_tty_info { __u8 dlsap_sel; }; -extern char *ircomm_state[]; -extern char *ircomm_tty_state[]; +extern const char *const ircomm_state[]; +extern const char *const ircomm_tty_state[]; int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index 08387553b57..a059465101f 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -53,10 +53,6 @@ typedef __u32 magic_t; #ifndef IRDA_ALIGN # define IRDA_ALIGN __attribute__((aligned)) #endif -#ifndef IRDA_PACK -# define IRDA_PACK __attribute__((packed)) -#endif - #ifdef CONFIG_IRDA_DEBUG @@ -72,7 +68,7 @@ do { if (irda_debug >= (n)) \ #define IRDA_ASSERT(expr, func) \ do { if(!(expr)) { \ printk( "Assertion failed! %s:%s:%d %s\n", \ - __FILE__,__FUNCTION__,__LINE__,(#expr) ); \ + __FILE__,__func__,__LINE__,(#expr) ); \ func } } while (0) #define IRDA_ASSERT_LABEL(label) label #else @@ -116,20 +112,19 @@ do { if(!(expr)) { \ struct net_device; struct packet_type; -extern void irda_proc_register(void); -extern void irda_proc_unregister(void); +void irda_proc_register(void); +void irda_proc_unregister(void); -extern int irda_sysctl_register(void); -extern void irda_sysctl_unregister(void); +int irda_sysctl_register(void); +void irda_sysctl_unregister(void); -extern int irsock_init(void); -extern void irsock_cleanup(void); +int irsock_init(void); +void irsock_cleanup(void); -extern int irda_nl_register(void); -extern void irda_nl_unregister(void); +int irda_nl_register(void); +void irda_nl_unregister(void); -extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); +int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); #endif /* NET_IRDA_H */ diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h index f70e9b39eba..664bf817841 100644 --- a/include/net/irda/irda_device.h +++ b/include/net/irda/irda_device.h @@ -24,9 +24,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -135,9 +133,11 @@ struct dongle_reg { /* * Per-packet information we need to hide inside sk_buff - * (must not exceed 48 bytes, check with struct sk_buff) + * (must not exceed 48 bytes, check with struct sk_buff) + * The default_qdisc_pad field is a temporary hack. */ struct irda_skb_cb { + unsigned int default_qdisc_pad; magic_t magic; /* Be sure that we can trust the information */ __u32 next_speed; /* The Speed to be set *after* this frame */ __u16 mtt; /* Minimum turn around time */ @@ -160,7 +160,7 @@ typedef struct { int irq, irq2; /* Interrupts used */ int dma, dma2; /* DMA channel(s) used */ int fifo_size; /* FIFO size */ - int irqflags; /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */ + int irqflags; /* interrupt flags (ie, IRQF_SHARED) */ int direction; /* Link direction, used by some FIR drivers */ int enabled; /* Powered on? */ int suspended; /* Suspended by APM */ @@ -223,7 +223,7 @@ int irda_device_is_receiving(struct net_device *dev); /* Interface for internal use */ static inline int irda_device_txqueue_empty(const struct net_device *dev) { - return skb_queue_empty(&dev->qdisc->q); + return qdisc_all_tx_empty(dev); } int irda_device_set_raw_mode(struct net_device* self, int status); struct net_device *alloc_irdadev(int sizeof_priv); diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h index 73cacb3ac16..550c2d6ec7f 100644 --- a/include/net/irda/irlan_common.h +++ b/include/net/irda/irlan_common.h @@ -32,6 +32,7 @@ #include <linux/types.h> #include <linux/skbuff.h> #include <linux/netdevice.h> +#include <linux/if_ether.h> #include <net/irda/irttp.h> @@ -161,7 +162,7 @@ struct irlan_provider_cb { int access_type; /* Access type */ __u16 send_arb_val; - __u8 mac_address[6]; /* Generated MAC address for peer device */ + __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */ }; /* @@ -171,7 +172,6 @@ struct irlan_cb { int magic; struct list_head dev_list; struct net_device *dev; /* Ethernet device structure*/ - struct net_device_stats stats; __u32 saddr; /* Source device address */ __u32 daddr; /* Destination device address */ diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h index 0062347600b..de5c81691f3 100644 --- a/include/net/irda/irlan_eth.h +++ b/include/net/irda/irlan_eth.h @@ -29,5 +29,4 @@ struct net_device *alloc_irlandev(const char *name); int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb); void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow); -void irlan_eth_send_gratuitous_arp(struct net_device *dev); #endif diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h index 6d9539f0580..018b5a77e61 100644 --- a/include/net/irda/irlan_event.h +++ b/include/net/irda/irlan_event.h @@ -67,7 +67,7 @@ typedef enum { IRLAN_WATCHDOG_TIMEOUT, } IRLAN_EVENT; -extern char *irlan_state[]; +extern const char * const irlan_state[]; void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index 9d0c78ea92f..fb4b76d5d7f 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -204,7 +204,7 @@ struct irlap_cb { notify_t notify; /* Callbacks to IrLMP */ - int mtt_required; /* Minumum turnaround time required */ + int mtt_required; /* Minimum turnaround time required */ int xbofs_delay; /* Nr of XBOF's used to MTT */ int bofs_count; /* Negotiated extra BOFs */ int next_bofs; /* Negotiated extra BOFs after next frame */ @@ -282,7 +282,7 @@ static inline int irlap_is_primary(struct irlap_cb *self) default: ret = -1; } - return(ret); + return ret; } /* Clear a pending IrLAP disconnect. - Jean II */ diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h index 2ae2e119ef4..e4325fee126 100644 --- a/include/net/irda/irlap_event.h +++ b/include/net/irda/irlap_event.h @@ -25,9 +25,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -120,12 +118,12 @@ typedef enum { /* FIXME check the two first reason codes */ LAP_PRIMARY_CONFLICT, } LAP_REASON; -extern const char *irlap_state[]; +extern const char *const irlap_state[]; void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); void irlap_print_event(IRLAP_EVENT event); -extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb); +int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb); #endif diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h index 641f88e848b..cbc12a926e5 100644 --- a/include/net/irda/irlap_frame.h +++ b/include/net/irda/irlap_frame.h @@ -24,9 +24,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ @@ -85,7 +83,7 @@ struct discovery_t; struct disc_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct xid_frame { __u8 caddr; /* Connection address */ @@ -96,41 +94,41 @@ struct xid_frame { __u8 flags; /* Discovery flags */ __u8 slotnr; __u8 version; -} IRDA_PACK; +} __packed; struct test_frame { __u8 caddr; /* Connection address */ __u8 control; __le32 saddr; /* Source device address */ __le32 daddr; /* Destination device address */ -} IRDA_PACK; +} __packed; struct ua_frame { __u8 caddr; __u8 control; __le32 saddr; /* Source device address */ __le32 daddr; /* Dest device address */ -} IRDA_PACK; +} __packed; struct dm_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct rd_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct rr_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct i_frame { __u8 caddr; __u8 control; -} IRDA_PACK; +} __packed; struct snrm_frame { __u8 caddr; @@ -138,7 +136,7 @@ struct snrm_frame { __le32 saddr; __le32 daddr; __u8 ncaddr; -} IRDA_PACK; +} __packed; void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, @@ -163,7 +161,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command); void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 caddr, int command); -extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self, - struct sk_buff *skb); +int irlap_insert_qos_negotiation_params(struct irlap_cb *self, + struct sk_buff *skb); #endif diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index 3ffc1d0f93d..f132924cc9d 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h @@ -134,7 +134,7 @@ typedef struct { } CACHE_ENTRY; /* - * Information about each registred IrLAP layer + * Information about each registered IrLAP layer */ struct lap_cb { irda_queue_t queue; /* Must be first */ @@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self) return (self && self->lap) ? self->lap->daddr : 0; } -extern const char *irlmp_reasons[]; +const char *irlmp_reason_str(LM_REASON reason); + extern int sysctl_discovery_timeout; extern int sysctl_discovery_slots; extern int sysctl_discovery; @@ -274,11 +275,11 @@ static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self) if (self->lap->irlap == NULL) return 0; - return(IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD); + return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD; } /* After doing a irlmp_dup(), this get one of the two socket back into - * a state where it's waiting incomming connections. + * a state where it's waiting incoming connections. * Note : this can be used *only* if the socket is not yet connected * (i.e. NO irlmp_connect_response() done on this socket). * - Jean II */ diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h index e03ae4ae396..9e4ec17a744 100644 --- a/include/net/irda/irlmp_event.h +++ b/include/net/irda/irlmp_event.h @@ -79,8 +79,8 @@ typedef enum { LM_LAP_IDLE_TIMEOUT, } IRLMP_EVENT; -extern const char *irlmp_state[]; -extern const char *irlsap_state[]; +extern const char *const irlmp_state[]; +extern const char *const irlsap_state[]; void irlmp_watchdog_timer_expired(void *data); void irlmp_discovery_timer_expired(void *data); diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h index 0788c23d282..98682d4bae8 100644 --- a/include/net/irda/irttp.h +++ b/include/net/irda/irttp.h @@ -97,7 +97,7 @@ #define TTP_MAX_SDU_SIZE 0x01 /* - * This structure contains all data assosiated with one instance of a TTP + * This structure contains all data associated with one instance of a TTP * connection. */ struct tsap_cb { @@ -185,7 +185,7 @@ static inline __u32 irttp_get_max_seg_size(struct tsap_cb *self) } /* After doing a irttp_dup(), this get one of the two socket back into - * a state where it's waiting incomming connections. + * a state where it's waiting incoming connections. * Note : this can be used *only* if the socket is not yet connected * (i.e. NO irttp_connect_response() done on this socket). * - Jean II */ @@ -204,7 +204,7 @@ static inline int irttp_is_primary(struct tsap_cb *self) (self->lsap->lap == NULL) || (self->lsap->lap->irlap == NULL)) return -2; - return(irlap_is_primary(self->lsap->lap->irlap)); + return irlap_is_primary(self->lsap->lap->irlap); } #endif /* IRTTP_H */ diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h index c0d938847bd..42713c931d1 100644 --- a/include/net/irda/parameters.h +++ b/include/net/irda/parameters.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Michel Dänzer <daenzer@debian.org>, 10/2001 * - simplify irda_pv_t to avoid endianness issues diff --git a/include/net/irda/qos.h b/include/net/irda/qos.h index cc577dc0a0e..05a5a249956 100644 --- a/include/net/irda/qos.h +++ b/include/net/irda/qos.h @@ -22,9 +22,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h index 2942ad6ab93..eef53ebe3d7 100644 --- a/include/net/irda/wrapper.h +++ b/include/net/irda/wrapper.h @@ -42,7 +42,7 @@ #define IRDA_TRANS 0x20 /* Asynchronous transparency modifier */ -/* States for receving a frame in async mode */ +/* States for receiving a frame in async mode */ enum { OUTSIDE_FRAME, BEGIN_FRAME, diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index 85f80eadfa3..714cc9a54a4 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -14,6 +14,7 @@ #include <linux/list.h> #include <linux/poll.h> #include <linux/socket.h> +#include <net/iucv/iucv.h> #ifndef AF_IUCV #define AF_IUCV 32 @@ -26,13 +27,13 @@ enum { IUCV_OPEN, IUCV_BOUND, IUCV_LISTEN, - IUCV_SEVERED, IUCV_DISCONN, IUCV_CLOSING, IUCV_CLOSED }; #define IUCV_QUEUELEN_DEFAULT 65535 +#define IUCV_HIPER_MSGLIM_DEFAULT 128 #define IUCV_CONN_TIMEOUT (HZ * 40) #define IUCV_DISCONN_TIMEOUT (HZ * 2) #define IUCV_CONN_IDLE_TIMEOUT (HZ * 60) @@ -57,8 +58,52 @@ struct sock_msg_q { spinlock_t lock; }; +#define AF_IUCV_FLAG_ACK 0x1 +#define AF_IUCV_FLAG_SYN 0x2 +#define AF_IUCV_FLAG_FIN 0x4 +#define AF_IUCV_FLAG_WIN 0x8 +#define AF_IUCV_FLAG_SHT 0x10 + +struct af_iucv_trans_hdr { + u16 magic; + u8 version; + u8 flags; + u16 window; + char destNodeID[8]; + char destUserID[8]; + char destAppName[16]; + char srcNodeID[8]; + char srcUserID[8]; + char srcAppName[16]; /* => 70 bytes */ + struct iucv_message iucv_hdr; /* => 33 bytes */ + u8 pad; /* total 104 bytes */ +} __packed; + +enum iucv_tx_notify { + /* transmission of skb is completed and was successful */ + TX_NOTIFY_OK = 0, + /* target is unreachable */ + TX_NOTIFY_UNREACHABLE = 1, + /* transfer pending queue full */ + TX_NOTIFY_TPQFULL = 2, + /* general error */ + TX_NOTIFY_GENERALERROR = 3, + /* transmission of skb is pending - may interleave + * with TX_NOTIFY_DELAYED_* */ + TX_NOTIFY_PENDING = 4, + /* transmission of skb was done successfully (delayed) */ + TX_NOTIFY_DELAYED_OK = 5, + /* target unreachable (detected delayed) */ + TX_NOTIFY_DELAYED_UNREACHABLE = 6, + /* general error (detected delayed) */ + TX_NOTIFY_DELAYED_GENERALERROR = 7, +}; + #define iucv_sk(__sk) ((struct iucv_sock *) __sk) +#define AF_IUCV_TRANS_IUCV 0 +#define AF_IUCV_TRANS_HIPER 1 + struct iucv_sock { struct sock sk; char src_user_id[8]; @@ -69,12 +114,38 @@ struct iucv_sock { spinlock_t accept_q_lock; struct sock *parent; struct iucv_path *path; + struct net_device *hs_dev; struct sk_buff_head send_skb_q; struct sk_buff_head backlog_skb_q; struct sock_msg_q message_q; unsigned int send_tag; + u8 flags; + u16 msglimit; + u16 msglimit_peer; + atomic_t msg_sent; + atomic_t msg_recv; + atomic_t pendings; + int transport; + void (*sk_txnotify)(struct sk_buff *skb, + enum iucv_tx_notify n); }; +struct iucv_skb_cb { + u32 class; /* target class of message */ + u32 tag; /* tag associated with message */ + u32 offset; /* offset for skb receival */ +}; + +#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0])) + +/* iucv socket options (SOL_IUCV) */ +#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ +#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ +#define SO_MSGSIZE 0x0800 /* get maximum msgsize */ + +/* iucv related control messages (scm) */ +#define SCM_IUCV_TRGCLS 0x0001 /* target class control message */ + struct iucv_sock_list { struct hlist_head head; rwlock_t lock; @@ -85,9 +156,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); -int iucv_sock_wait_state(struct sock *sk, int state, int state2, - unsigned long timeo); -int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); void iucv_accept_unlink(struct sock *sk); struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock); diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index fd70adbb356..0894ced3195 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h @@ -28,6 +28,7 @@ */ #include <linux/types.h> +#include <linux/slab.h> #include <asm/debug.h> /* @@ -119,7 +120,7 @@ struct iucv_message { u32 reply_size; u8 rmmsg[8]; u8 flags; -}; +} __packed; /* * struct iucv_handler @@ -172,7 +173,7 @@ struct iucv_handler { /* * The message_pending function is called after an icuv interrupt * type 0x06 or type 0x07 has been received. A new message is - * availabe and can be received with iucv_message_receive. + * available and can be received with iucv_message_receive. */ void (*message_pending)(struct iucv_path *, struct iucv_message *); /* @@ -337,12 +338,35 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, * established paths. This function will deal with RMDATA messages * embedded in struct iucv_message as well. * + * Locking: local_bh_enable/local_bh_disable + * * Returns the result from the CP IUCV call. */ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual); /** + * __iucv_message_receive + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: flags that affect how the message is received (IUCV_IPBUFLST) + * @buffer: address of data buffer or address of struct iucv_array + * @size: length of data buffer + * @residual: + * + * This function receives messages that are being sent to you over + * established paths. This function will deal with RMDATA messages + * embedded in struct iucv_message as well. + * + * Locking: no locking. + * + * Returns the result from the CP IUCV call. + */ +int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *buffer, size_t size, + size_t *residual); + +/** * iucv_message_reject * @path: address of iucv path structure * @msg: address of iucv msg structure @@ -386,12 +410,34 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, * transmitted is in a buffer and this is a one-way message and the * receiver will not reply to the message. * + * Locking: local_bh_enable/local_bh_disable + * * Returns the result from the CP IUCV call. */ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size); /** + * __iucv_message_send + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) + * @srccls: source class of message + * @buffer: address of data buffer or address of struct iucv_array + * @size: length of send buffer + * + * This function transmits data to another application. Data to be + * transmitted is in a buffer and this is a one-way message and the + * receiver will not reply to the message. + * + * Locking: no locking. + * + * Returns the result from the CP IUCV call. + */ +int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size); + +/** * iucv_message_send2way * @path: address of iucv path structure * @msg: address of iucv msg structure @@ -413,3 +459,37 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size, void *answer, size_t asize, size_t *residual); + +struct iucv_interface { + int (*message_receive)(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *buffer, size_t size, size_t *residual); + int (*__message_receive)(struct iucv_path *path, + struct iucv_message *msg, u8 flags, void *buffer, size_t size, + size_t *residual); + int (*message_reply)(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *reply, size_t size); + int (*message_reject)(struct iucv_path *path, struct iucv_message *msg); + int (*message_send)(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size); + int (*__message_send)(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size); + int (*message_send2way)(struct iucv_path *path, + struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, + size_t size, void *answer, size_t asize, size_t *residual); + int (*message_purge)(struct iucv_path *path, struct iucv_message *msg, + u32 srccls); + int (*path_accept)(struct iucv_path *path, struct iucv_handler *handler, + u8 userdata[16], void *private); + int (*path_connect)(struct iucv_path *path, + struct iucv_handler *handler, + u8 userid[8], u8 system[8], u8 userdata[16], void *private); + int (*path_quiesce)(struct iucv_path *path, u8 userdata[16]); + int (*path_resume)(struct iucv_path *path, u8 userdata[16]); + int (*path_sever)(struct iucv_path *path, u8 userdata[16]); + int (*iucv_register)(struct iucv_handler *handler, int smp); + void (*iucv_unregister)(struct iucv_handler *handler, int smp); + struct bus_type *bus; + struct device *root; +}; + +extern struct iucv_interface iucv_if; diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 369d50e08b9..a830b01baba 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -91,7 +91,7 @@ * -------------------- * The implementation goals were as follow : * o Obvious : you should not need a PhD to understand what's happening, - * the benefit is easier maintainance. + * the benefit is easier maintenance. * o Flexible : it should accommodate a wide variety of driver * implementations and be as flexible as the old API. * o Lean : it should be efficient memory wise to minimise the impact @@ -129,7 +129,7 @@ * * Functions prototype uses union iwreq_data * ----------------------------------------- - * Some would have prefered functions defined this way : + * Some would have preferred functions defined this way : * static int mydriver_ioctl_setrate(struct net_device *dev, * long rate, int auto) * 1) The kernel code doesn't "validate" the content of iwreq_data, and @@ -256,7 +256,7 @@ #define EIWCOMMIT EINPROGRESS /* Flags available in struct iw_request_info */ -#define IW_REQUEST_FLAG_NONE 0x0000 /* No flag so far */ +#define IW_REQUEST_FLAG_COMPAT 0x0001 /* Compat ioctl call */ /* Type of headers we know about (basically union iwreq_data) */ #define IW_HEADER_TYPE_NULL 0 /* Not available */ @@ -300,8 +300,7 @@ * This struct is also my long term insurance. I can add new fields here * without breaking the prototype of iw_handler... */ -struct iw_request_info -{ +struct iw_request_info { __u16 cmd; /* Wireless Extension command */ __u16 flags; /* More to come ;-) */ }; @@ -321,20 +320,20 @@ typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, * shared by all driver instances... Same for the members... * This will be linked from net_device in <linux/netdevice.h> */ -struct iw_handler_def -{ +struct iw_handler_def { + + /* Array of handlers for standard ioctls + * We will call dev->wireless_handlers->standard[ioctl - SIOCIWFIRST] + */ + const iw_handler * standard; /* Number of handlers defined (more precisely, index of the * last defined handler + 1) */ __u16 num_standard; + +#ifdef CONFIG_WEXT_PRIV __u16 num_private; /* Number of private arg description */ __u16 num_private_args; - - /* Array of handlers for standard ioctls - * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT] - */ - const iw_handler * standard; - /* Array of handlers for private ioctls * Will call dev->wireless_handlers->private[ioctl - SIOCIWFIRSTPRIV] */ @@ -344,6 +343,7 @@ struct iw_handler_def * can put it in any order you want and should not leave holes... * We will automatically export that to user space... */ const struct iw_priv_args * private_args; +#endif /* New location of get_wireless_stats, to de-bloat struct net_device. * The old pointer in struct net_device will be gradually phased @@ -370,8 +370,7 @@ struct iw_handler_def /* * Describe how a standard IOCTL looks like. */ -struct iw_ioctl_description -{ +struct iw_ioctl_description { __u8 header_type; /* NULL, iw_point or other */ __u8 token_type; /* Future */ __u16 token_size; /* Granularity of payload */ @@ -393,8 +392,7 @@ struct iw_ioctl_description /* * Instance specific spy data, i.e. addresses spied and quality for them. */ -struct iw_spy_data -{ +struct iw_spy_data { /* --- Standard spy support --- */ int spy_number; u_char spy_address[IW_MAX_SPY][ETH_ALEN]; @@ -416,13 +414,13 @@ struct iw_spy_data * data (i.e. valid as long as struct net_device exist, same locking rules). */ /* Forward declaration */ -struct ieee80211_device; +struct libipw_device; /* The struct */ struct iw_public_data { /* Driver enhanced spy support */ struct iw_spy_data * spy_data; - /* Structure managed by the in-kernel IEEE 802.11 layer */ - struct ieee80211_device * ieee80211; + /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */ + struct libipw_device * libipw; }; /**************************** PROTOTYPES ****************************/ @@ -434,149 +432,90 @@ struct iw_public_data { /* First : function strictly used inside the kernel */ /* Handle /proc/net/wireless, called in net/code/dev.c */ -extern int dev_get_wireless_info(char * buffer, char **start, off_t offset, - int length); +int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); /* Second : functions that may be called by driver modules */ /* Send a single event to user space */ -extern void wireless_send_event(struct net_device * dev, - unsigned int cmd, - union iwreq_data * wrqu, - char * extra); +void wireless_send_event(struct net_device *dev, unsigned int cmd, + union iwreq_data *wrqu, const char *extra); /* We may need a function to send a stream of events to user space. * More on that later... */ /* Standard handler for SIOCSIWSPY */ -extern int iw_handler_set_spy(struct net_device * dev, - struct iw_request_info * info, - union iwreq_data * wrqu, - char * extra); +int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); /* Standard handler for SIOCGIWSPY */ -extern int iw_handler_get_spy(struct net_device * dev, - struct iw_request_info * info, - union iwreq_data * wrqu, - char * extra); +int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); /* Standard handler for SIOCSIWTHRSPY */ -extern int iw_handler_set_thrspy(struct net_device * dev, - struct iw_request_info *info, - union iwreq_data * wrqu, - char * extra); +int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); /* Standard handler for SIOCGIWTHRSPY */ -extern int iw_handler_get_thrspy(struct net_device * dev, - struct iw_request_info *info, - union iwreq_data * wrqu, - char * extra); +int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); /* Driver call to update spy records */ -extern void wireless_spy_update(struct net_device * dev, - unsigned char * address, - struct iw_quality * wstats); +void wireless_spy_update(struct net_device *dev, unsigned char *address, + struct iw_quality *wstats); /************************* INLINE FUNTIONS *************************/ /* * Function that are so simple that it's more efficient inlining them */ -/*------------------------------------------------------------------*/ -/* - * Wrapper to add an Wireless Event to a stream of events. - */ -static inline char * -iwe_stream_add_event(char * stream, /* Stream of events */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload */ - int event_len) /* Real size of payload */ +static inline int iwe_stream_lcp_len(struct iw_request_info *info) { - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - /* Beware of alignement issues on 64 bits */ - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_LCP_LEN, - ((char *) iwe) + IW_EV_LCP_LEN, - event_len - IW_EV_LCP_LEN); - stream += event_len; - } - return stream; +#ifdef CONFIG_COMPAT + if (info->flags & IW_REQUEST_FLAG_COMPAT) + return IW_EV_COMPAT_LCP_LEN; +#endif + return IW_EV_LCP_LEN; } -/*------------------------------------------------------------------*/ -/* - * Wrapper to add an short Wireless Event containing a pointer to a - * stream of events. - */ -static inline char * -iwe_stream_add_point(char * stream, /* Stream of events */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload length + flags */ - char * extra) /* More payload */ +static inline int iwe_stream_point_len(struct iw_request_info *info) { - int event_len = IW_EV_POINT_LEN + iwe->u.data.length; - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_LCP_LEN, - ((char *) iwe) + IW_EV_LCP_LEN + IW_EV_POINT_OFF, - IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length); - stream += event_len; - } - return stream; +#ifdef CONFIG_COMPAT + if (info->flags & IW_REQUEST_FLAG_COMPAT) + return IW_EV_COMPAT_POINT_LEN; +#endif + return IW_EV_POINT_LEN; } -/*------------------------------------------------------------------*/ -/* - * Wrapper to add a value to a Wireless Event in a stream of events. - * Be careful, this one is tricky to use properly : - * At the first run, you need to have (value = event + IW_EV_LCP_LEN). - */ -static inline char * -iwe_stream_add_value(char * event, /* Event in the stream */ - char * value, /* Value in event */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload */ - int event_len) /* Real size of payload */ +static inline int iwe_stream_event_len_adjust(struct iw_request_info *info, + int event_len) { - /* Don't duplicate LCP */ - event_len -= IW_EV_LCP_LEN; - - /* Check if it's possible */ - if(likely((value + event_len) < ends)) { - /* Add new value */ - memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len); - value += event_len; - /* Patch LCP */ - iwe->len = value - event; - memcpy(event, (char *) iwe, IW_EV_LCP_LEN); +#ifdef CONFIG_COMPAT + if (info->flags & IW_REQUEST_FLAG_COMPAT) { + event_len -= IW_EV_LCP_LEN; + event_len += IW_EV_COMPAT_LCP_LEN; } - return value; +#endif + + return event_len; } /*------------------------------------------------------------------*/ /* * Wrapper to add an Wireless Event to a stream of events. - * Same as above, with explicit error check... */ static inline char * -iwe_stream_check_add_event(char * stream, /* Stream of events */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload */ - int event_len, /* Size of payload */ - int * perr) /* Error report */ +iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, + struct iw_event *iwe, int event_len) { - /* Check if it's possible, set error if not */ + int lcp_len = iwe_stream_lcp_len(info); + + event_len = iwe_stream_event_len_adjust(info, event_len); + + /* Check if it's possible */ if(likely((stream + event_len) < ends)) { iwe->len = event_len; /* Beware of alignement issues on 64 bits */ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_LCP_LEN, - ((char *) iwe) + IW_EV_LCP_LEN, - event_len - IW_EV_LCP_LEN); + memcpy(stream + lcp_len, &iwe->u, + event_len - lcp_len); stream += event_len; - } else - *perr = -E2BIG; + } return stream; } @@ -584,27 +523,25 @@ iwe_stream_check_add_event(char * stream, /* Stream of events */ /* * Wrapper to add an short Wireless Event containing a pointer to a * stream of events. - * Same as above, with explicit error check... */ static inline char * -iwe_stream_check_add_point(char * stream, /* Stream of events */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload length + flags */ - char * extra, /* More payload */ - int * perr) /* Error report */ +iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, + struct iw_event *iwe, char *extra) { - int event_len = IW_EV_POINT_LEN + iwe->u.data.length; + int event_len = iwe_stream_point_len(info) + iwe->u.data.length; + int point_len = iwe_stream_point_len(info); + int lcp_len = iwe_stream_lcp_len(info); + /* Check if it's possible */ if(likely((stream + event_len) < ends)) { iwe->len = event_len; memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_LCP_LEN, - ((char *) iwe) + IW_EV_LCP_LEN + IW_EV_POINT_OFF, + memcpy(stream + lcp_len, + ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length); + memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; - } else - *perr = -E2BIG; + } return stream; } @@ -613,29 +550,25 @@ iwe_stream_check_add_point(char * stream, /* Stream of events */ * Wrapper to add a value to a Wireless Event in a stream of events. * Be careful, this one is tricky to use properly : * At the first run, you need to have (value = event + IW_EV_LCP_LEN). - * Same as above, with explicit error check... */ static inline char * -iwe_stream_check_add_value(char * event, /* Event in the stream */ - char * value, /* Value in event */ - char * ends, /* End of stream */ - struct iw_event *iwe, /* Payload */ - int event_len, /* Size of payload */ - int * perr) /* Error report */ +iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, + char *ends, struct iw_event *iwe, int event_len) { + int lcp_len = iwe_stream_lcp_len(info); + /* Don't duplicate LCP */ event_len -= IW_EV_LCP_LEN; /* Check if it's possible */ if(likely((value + event_len) < ends)) { /* Add new value */ - memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len); + memcpy(value, &iwe->u, event_len); value += event_len; /* Patch LCP */ iwe->len = value - event; - memcpy(event, (char *) iwe, IW_EV_LCP_LEN); - } else - *perr = -E2BIG; + memcpy(event, (char *) iwe, lcp_len); + } return value; } diff --git a/include/net/lapb.h b/include/net/lapb.h index 96cb5ddaa9f..9510f8725f0 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -95,7 +95,7 @@ struct lapb_cb { struct sk_buff_head write_queue; struct sk_buff_head ack_queue; unsigned char window; - struct lapb_register_struct callbacks; + const struct lapb_register_struct *callbacks; /* FRMR control information */ struct lapb_frame frmr_data; @@ -105,40 +105,40 @@ struct lapb_cb { }; /* lapb_iface.c */ -extern void lapb_connect_confirmation(struct lapb_cb *lapb, int); -extern void lapb_connect_indication(struct lapb_cb *lapb, int); -extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int); -extern void lapb_disconnect_indication(struct lapb_cb *lapb, int); -extern int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *); -extern int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *); +void lapb_connect_confirmation(struct lapb_cb *lapb, int); +void lapb_connect_indication(struct lapb_cb *lapb, int); +void lapb_disconnect_confirmation(struct lapb_cb *lapb, int); +void lapb_disconnect_indication(struct lapb_cb *lapb, int); +int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *); +int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *); /* lapb_in.c */ -extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *); +void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *); /* lapb_out.c */ -extern void lapb_kick(struct lapb_cb *lapb); -extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int); -extern void lapb_establish_data_link(struct lapb_cb *lapb); -extern void lapb_enquiry_response(struct lapb_cb *lapb); -extern void lapb_timeout_response(struct lapb_cb *lapb); -extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short); -extern void lapb_check_need_response(struct lapb_cb *lapb, int, int); +void lapb_kick(struct lapb_cb *lapb); +void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int); +void lapb_establish_data_link(struct lapb_cb *lapb); +void lapb_enquiry_response(struct lapb_cb *lapb); +void lapb_timeout_response(struct lapb_cb *lapb); +void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short); +void lapb_check_need_response(struct lapb_cb *lapb, int, int); /* lapb_subr.c */ -extern void lapb_clear_queues(struct lapb_cb *lapb); -extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short); -extern void lapb_requeue_frames(struct lapb_cb *lapb); -extern int lapb_validate_nr(struct lapb_cb *lapb, unsigned short); -extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *); -extern void lapb_send_control(struct lapb_cb *lapb, int, int, int); -extern void lapb_transmit_frmr(struct lapb_cb *lapb); +void lapb_clear_queues(struct lapb_cb *lapb); +void lapb_frames_acked(struct lapb_cb *lapb, unsigned short); +void lapb_requeue_frames(struct lapb_cb *lapb); +int lapb_validate_nr(struct lapb_cb *lapb, unsigned short); +int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *); +void lapb_send_control(struct lapb_cb *lapb, int, int, int); +void lapb_transmit_frmr(struct lapb_cb *lapb); /* lapb_timer.c */ -extern void lapb_start_t1timer(struct lapb_cb *lapb); -extern void lapb_start_t2timer(struct lapb_cb *lapb); -extern void lapb_stop_t1timer(struct lapb_cb *lapb); -extern void lapb_stop_t2timer(struct lapb_cb *lapb); -extern int lapb_t1timer_running(struct lapb_cb *lapb); +void lapb_start_t1timer(struct lapb_cb *lapb); +void lapb_start_t2timer(struct lapb_cb *lapb); +void lapb_stop_t1timer(struct lapb_cb *lapb); +void lapb_stop_t2timer(struct lapb_cb *lapb); +int lapb_t1timer_running(struct lapb_cb *lapb); /* * Debug levels. @@ -149,4 +149,10 @@ extern int lapb_t1timer_running(struct lapb_cb *lapb); */ #define LAPB_DEBUG 0 +#define lapb_dbg(level, fmt, ...) \ +do { \ + if (level < LAPB_DEBUG) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + #endif diff --git a/include/net/ieee80211_crypt.h b/include/net/lib80211.h index b3d65e0bedd..be95b926280 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/lib80211.h @@ -1,4 +1,11 @@ /* + * lib80211.h -- common bits for IEEE802.11 wireless drivers + * + * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com> + * + * Some bits copied from old ieee80211 component, w/ original copyright + * notices below: + * * Original code based on Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3. * @@ -11,31 +18,34 @@ * * Copyright (c) 2004, Intel Corporation * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. See README and COPYING for - * more details. */ -/* - * This file defines the interface to the ieee80211 crypto module. - */ -#ifndef IEEE80211_CRYPT_H -#define IEEE80211_CRYPT_H +#ifndef LIB80211_H +#define LIB80211_H #include <linux/types.h> #include <linux/list.h> -#include <net/ieee80211.h> -#include <asm/atomic.h> +#include <linux/atomic.h> +#include <linux/if.h> +#include <linux/skbuff.h> +#include <linux/ieee80211.h> +#include <linux/timer.h> +#include <linux/seq_file.h> + +/* print_ssid() is intended to be used in debug (and possibly error) + * messages. It should never be used for passing ssid to user space. */ +const char *print_ssid(char *buf, const char *ssid, u8 ssid_len); +#define DECLARE_SSID_BUF(var) char var[IEEE80211_MAX_SSID_LEN * 4 + 1] __maybe_unused + +#define NUM_WEP_KEYS 4 enum { IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0), }; -struct sk_buff; struct module; -struct ieee80211_crypto_ops { +struct lib80211_crypto_ops { const char *name; struct list_head list; @@ -47,9 +57,6 @@ struct ieee80211_crypto_ops { /* deinitialize crypto context and free allocated private data */ void (*deinit) (void *priv); - int (*build_iv) (struct sk_buff * skb, int hdr_len, - u8 *key, int keylen, void *priv); - /* encrypt/decrypt return < 0 on error or >= 0 on success. The return * value from decrypt_mpdu is passed as the keyidx value for * decrypt_msdu. skb must have enough head and tail room for the @@ -70,7 +77,7 @@ struct ieee80211_crypto_ops { /* procfs handler for printing out key information and possible * statistics */ - char *(*print_stats) (char *p, void *priv); + void (*print_stats) (struct seq_file *m, void *priv); /* Crypto specific flag get/set for configuration settings */ unsigned long (*get_flags) (void *priv); @@ -87,22 +94,33 @@ struct ieee80211_crypto_ops { struct module *owner; }; -struct ieee80211_crypt_data { +struct lib80211_crypt_data { struct list_head list; /* delayed deletion list */ - struct ieee80211_crypto_ops *ops; + struct lib80211_crypto_ops *ops; void *priv; atomic_t refcnt; }; -struct ieee80211_device; +struct lib80211_crypt_info { + char *name; + /* Most clients will already have a lock, + so just point to that. */ + spinlock_t *lock; + + struct lib80211_crypt_data *crypt[NUM_WEP_KEYS]; + int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ + struct list_head crypt_deinit_list; + struct timer_list crypt_deinit_timer; + int crypt_quiesced; +}; -int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops); -int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops); -struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name); -void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int); -void ieee80211_crypt_deinit_handler(unsigned long); -void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, - struct ieee80211_crypt_data **crypt); -void ieee80211_crypt_quiescing(struct ieee80211_device *ieee); +int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, + spinlock_t *lock); +void lib80211_crypt_info_free(struct lib80211_crypt_info *info); +int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops); +int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops); +struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name); +void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, + struct lib80211_crypt_data **crypt); -#endif +#endif /* LIB80211_H */ diff --git a/include/net/llc.h b/include/net/llc.h index 7940da1606e..e8e61d4fb45 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -16,8 +16,11 @@ #include <linux/if_ether.h> #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/rculist_nulls.h> +#include <linux/hash.h> +#include <linux/jhash.h> -#include <asm/atomic.h> +#include <linux/atomic.h> struct net_device; struct packet_type; @@ -31,6 +34,12 @@ struct llc_addr { #define LLC_SAP_STATE_INACTIVE 1 #define LLC_SAP_STATE_ACTIVE 2 +#define LLC_SK_DEV_HASH_BITS 6 +#define LLC_SK_DEV_HASH_ENTRIES (1<<LLC_SK_DEV_HASH_BITS) + +#define LLC_SK_LADDR_HASH_BITS 6 +#define LLC_SK_LADDR_HASH_ENTRIES (1<<LLC_SK_LADDR_HASH_BITS) + /** * struct llc_sap - Defines the SAP component * @@ -53,42 +62,61 @@ struct llc_sap { struct net_device *orig_dev); struct llc_addr laddr; struct list_head node; - struct { - rwlock_t lock; - struct hlist_head list; - } sk_list; + spinlock_t sk_lock; + int sk_count; + struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES]; + struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES]; }; +static inline +struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex) +{ + return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES]; +} + +static inline +u32 llc_sk_laddr_hashfn(struct llc_sap *sap, const struct llc_addr *laddr) +{ + return hash_32(jhash(laddr->mac, sizeof(laddr->mac), 0), + LLC_SK_LADDR_HASH_BITS); +} + +static inline +struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap, + const struct llc_addr *laddr) +{ + return &sap->sk_laddr_hash[llc_sk_laddr_hashfn(sap, laddr)]; +} + #define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */ #define LLC_DEST_SAP 1 /* Type 1 goes here */ #define LLC_DEST_CONN 2 /* Type 2 goes here */ extern struct list_head llc_sap_list; -extern rwlock_t llc_sap_list_lock; -extern int llc_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev); +int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, + struct net_device *orig_dev); -extern int llc_mac_hdr_init(struct sk_buff *skb, - const unsigned char *sa, const unsigned char *da); +int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa, + const unsigned char *da); -extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, - struct sk_buff *skb)); -extern void llc_remove_pack(int type); +void llc_add_pack(int type, + void (*handler)(struct llc_sap *sap, struct sk_buff *skb)); +void llc_remove_pack(int type); -extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb)); +void llc_set_station_handler(void (*handler)(struct sk_buff *skb)); -extern struct llc_sap *llc_sap_open(unsigned char lsap, - int (*rcv)(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev)); +struct llc_sap *llc_sap_open(unsigned char lsap, + int (*rcv)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev)); static inline void llc_sap_hold(struct llc_sap *sap) { atomic_inc(&sap->refcnt); } -extern void llc_sap_close(struct llc_sap *sap); +void llc_sap_close(struct llc_sap *sap); static inline void llc_sap_put(struct llc_sap *sap) { @@ -96,33 +124,32 @@ static inline void llc_sap_put(struct llc_sap *sap) llc_sap_close(sap); } -extern struct llc_sap *llc_sap_find(unsigned char sap_value); +struct llc_sap *llc_sap_find(unsigned char sap_value); -extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, - unsigned char *dmac, unsigned char dsap); +int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, + unsigned char *dmac, unsigned char dsap); -extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); -extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); +void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); +void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_station_init(void); -extern void llc_station_exit(void); +void llc_station_init(void); +void llc_station_exit(void); #ifdef CONFIG_PROC_FS -extern int llc_proc_init(void); -extern void llc_proc_exit(void); +int llc_proc_init(void); +void llc_proc_exit(void); #else #define llc_proc_init() (0) #define llc_proc_exit() do { } while(0) #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSCTL -extern int llc_sysctl_init(void); -extern void llc_sysctl_exit(void); +int llc_sysctl_init(void); +void llc_sysctl_exit(void); extern int sysctl_llc2_ack_timeout; extern int sysctl_llc2_busy_timeout; extern int sysctl_llc2_p_timeout; extern int sysctl_llc2_rej_timeout; -extern int sysctl_llc_station_ack_timeout; #else #define llc_sysctl_init() (0) #define llc_sysctl_exit() do { } while(0) diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h index df83f69d2de..f3be818e73c 100644 --- a/include/net/llc_c_ac.h +++ b/include/net/llc_c_ac.h @@ -89,114 +89,92 @@ typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk, +int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk, +int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk, - struct sk_buff *skb); -extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb); -extern u8 llc_circular_between(u8 a, u8 b, u8 c); -extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb); -extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb); +int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb); +int llc_conn_disc(struct sock *sk, struct sk_buff *skb); +int llc_conn_reset(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb); +u8 llc_circular_between(u8 a, u8 b, u8 c); +int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb); +int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb); -extern void llc_conn_busy_tmr_cb(unsigned long timeout_data); -extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data); -extern void llc_conn_ack_tmr_cb(unsigned long timeout_data); -extern void llc_conn_rej_tmr_cb(unsigned long timeout_data); +void llc_conn_busy_tmr_cb(unsigned long timeout_data); +void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data); +void llc_conn_ack_tmr_cb(unsigned long timeout_data); +void llc_conn_rej_tmr_cb(unsigned long timeout_data); -extern void llc_conn_set_p_flag(struct sock *sk, u8 value); +void llc_conn_set_p_flag(struct sock *sk, u8 value); #endif /* LLC_C_AC_H */ diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h index 23a409381fa..3948cf111dd 100644 --- a/include/net/llc_c_ev.h +++ b/include/net/llc_c_ev.h @@ -128,142 +128,97 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_local_busy_detected(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, +int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, +int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb); /* NOT_USED functions and their variations */ -extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, + struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb); /* Available connection action qualifiers */ -extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, +int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, - struct sk_buff *skb); -extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, - struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb); +int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb); static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) { return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < - (unsigned)sk->sk_rcvbuf; + (unsigned int)sk->sk_rcvbuf; } #endif /* LLC_C_EV_H */ diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index e2374e34989..0134681acc4 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -76,6 +76,8 @@ struct llc_sock { u32 rx_pdu_hdr; /* used for saving header of last pdu received and caused sending FRMR. Used for resending FRMR */ + u32 cmsg_flags; + struct hlist_node dev_hash_node; }; static inline struct llc_sock *llc_sk(const struct sock *sk) @@ -93,28 +95,24 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) return skb->cb[sizeof(skb->cb) - 1]; } -extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, - struct proto *prot); -extern void llc_sk_free(struct sock *sk); +struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, + struct proto *prot); +void llc_sk_free(struct sock *sk); -extern void llc_sk_reset(struct sock *sk); +void llc_sk_reset(struct sock *sk); /* Access to a connection */ -extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); -extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); -extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); -extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, - u8 first_p_bit); -extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, - u8 first_f_bit); -extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, - u16 *how_many_unacked); -extern struct sock *llc_lookup_established(struct llc_sap *sap, - struct llc_addr *daddr, - struct llc_addr *laddr); -extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk); -extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk); +int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); +void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); +void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); +void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); +int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked); +struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, + struct llc_addr *laddr); +void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk); +void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk); -extern u8 llc_data_accept_state(u8 state); -extern void llc_build_offset_table(void); +u8 llc_data_accept_state(u8 state); +void llc_build_offset_table(void); #endif /* LLC_CONN_H */ diff --git a/include/net/llc_if.h b/include/net/llc_if.h index c608812a8e8..8d5c543cd62 100644 --- a/include/net/llc_if.h +++ b/include/net/llc_if.h @@ -62,43 +62,7 @@ #define LLC_STATUS_CONFLICT 7 /* disconnect conn */ #define LLC_STATUS_RESET_DONE 8 /* */ -/** - * llc_mac_null - determines if a address is a null mac address - * @mac: Mac address to test if null. - * - * Determines if a given address is a null mac address. Returns 0 if the - * address is not a null mac, 1 if the address is a null mac. - */ -static inline int llc_mac_null(const u8 *mac) -{ - return is_zero_ether_addr(mac); -} - -static inline int llc_addrany(const struct llc_addr *addr) -{ - return llc_mac_null(addr->mac) && !addr->lsap; -} - -static inline int llc_mac_multicast(const u8 *mac) -{ - return is_multicast_ether_addr(mac); -} -/** - * llc_mac_match - determines if two mac addresses are the same - * @mac1: First mac address to compare. - * @mac2: Second mac address to compare. - * - * Determines if two given mac address are the same. Returns 0 if there - * is not a complete match up to len, 1 if a complete match up to len is - * found. - */ -static inline int llc_mac_match(const u8 *mac1, const u8 *mac2) -{ - return !compare_ether_addr(mac1, mac2); -} - -extern int llc_establish_connection(struct sock *sk, u8 *lmac, - u8 *dmac, u8 dsap); -extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb); -extern int llc_send_disc(struct sock *sk); +int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap); +int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb); +int llc_send_disc(struct sock *sk); #endif /* LLC_IF_H */ diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 75b8e2968c9..c0f0a13ed81 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -13,7 +13,6 @@ */ #include <linux/if_ether.h> -#include <linux/if_tr.h> /* Lengths of frame formats */ #define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ @@ -143,7 +142,7 @@ #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) -#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) +#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO) /* FRMR information field macros */ @@ -199,7 +198,7 @@ struct llc_pdu_sn { u8 ssap; u8 ctrl_1; u8 ctrl_2; -}; +} __packed; static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) { @@ -211,7 +210,7 @@ struct llc_pdu_un { u8 dsap; u8 ssap; u8 ctrl_1; -}; +} __packed; static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) { @@ -253,10 +252,6 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) { - memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN); - *sa &= 0x7F; - } } /** @@ -270,8 +265,6 @@ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) - memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN); } /** @@ -359,7 +352,7 @@ struct llc_xid_info { u8 fmt_id; /* always 0x81 for LLC */ u8 type; /* different if NULL/non-NULL LSAP */ u8 rw; /* sender receive window */ -}; +} __packed; /** * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID @@ -415,23 +408,22 @@ struct llc_frmr_info { u8 curr_ssv; /* current send state variable val */ u8 curr_rsv; /* current receive state variable */ u8 ind_bits; /* indicator bits set with macro */ -}; - -extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); -extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); -extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit); -extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit); -extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr); -extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); -extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); -extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); -extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit); -extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit); -extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, - struct llc_pdu_sn *prev_pdu, - u8 f_bit, u8 vs, u8 vr, u8 vzyxw); -extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); -extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); -extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); -extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit); +} __packed; + +void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); +void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); +void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit); +void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit); +void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr); +void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr); +void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit); +void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit); +void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu, + u8 f_bit, u8 vs, u8 vr, u8 vzyxw); +void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr); +void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit); #endif /* LLC_PDU_H */ diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h index 37a3bbd0239..a61b98c108e 100644 --- a/include/net/llc_s_ac.h +++ b/include/net/llc_s_ac.h @@ -25,15 +25,13 @@ /* All action functions must look like this */ typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_unitdata_ind(struct llc_sap *sap, - struct sk_buff *skb); -extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_report_status(struct llc_sap *sap, - struct sk_buff *skb); -extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb); #endif /* LLC_S_AC_H */ diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h index e3acb9329e4..84db3a59ed2 100644 --- a/include/net/llc_s_ev.h +++ b/include/net/llc_s_ev.h @@ -53,15 +53,14 @@ struct llc_sap; typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_sap_ev_deactivation_req(struct llc_sap *sap, - struct sk_buff *skb); +int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb); +int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb); #endif /* LLC_S_EV_H */ diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h index ed25bec2f64..1e4df9fd9fb 100644 --- a/include/net/llc_sap.h +++ b/include/net/llc_sap.h @@ -19,18 +19,14 @@ struct net_device; struct sk_buff; struct sock; -extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb); -extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb, - unsigned char prim); -extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev, - u8 type, u32 data_size); +void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb); +void llc_save_primitive(struct sock *sk, struct sk_buff *skb, + unsigned char prim); +struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev, + u8 type, u32 data_size); -extern void llc_build_and_send_test_pkt(struct llc_sap *sap, - struct sk_buff *skb, - unsigned char *dmac, - unsigned char dsap); -extern void llc_build_and_send_xid_pkt(struct llc_sap *sap, - struct sk_buff *skb, - unsigned char *dmac, - unsigned char dsap); +void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb, + unsigned char *dmac, unsigned char dsap); +void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb, + unsigned char *dmac, unsigned char dsap); #endif /* LLC_SAP_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 9083bafb63c..421b6ecb4b2 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3,7 +3,7 @@ * * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> - * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,14 +13,13 @@ #ifndef MAC80211_H #define MAC80211_H +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/if_ether.h> #include <linux/skbuff.h> -#include <linux/wireless.h> -#include <linux/device.h> #include <linux/ieee80211.h> -#include <net/wireless.h> #include <net/cfg80211.h> +#include <asm/unaligned.h> /** * DOC: Introduction @@ -38,7 +37,11 @@ * called in hardware interrupt context. The low-level driver must not call any * other functions in hardware interrupt context. If there is a need for such * call, the low-level driver should first ACK the interrupt and perform the - * IEEE 802.11 code call after this, e.g. from a scheduled workqueue function. + * IEEE 802.11 code call after this, e.g. from a scheduled workqueue or even + * tasklet function. + * + * NOTE: If the driver opts to use the _irqsafe() functions, it may not also + * use the non-IRQ-safe functions! */ /** @@ -63,216 +66,163 @@ * * Secondly, when the hardware handles fragmentation, the frame handed to * the driver from mac80211 is the MSDU, not the MPDU. - * - * Finally, for received frames, the driver is able to indicate that it has - * filled a radiotap header and put that in front of the frame; if it does - * not do so then mac80211 may add this under certain circumstances. - */ - -#define IEEE80211_CHAN_W_SCAN 0x00000001 -#define IEEE80211_CHAN_W_ACTIVE_SCAN 0x00000002 -#define IEEE80211_CHAN_W_IBSS 0x00000004 - -/* Channel information structure. Low-level driver is expected to fill in chan, - * freq, and val fields. Other fields will be filled in by 80211.o based on - * hostapd information and low-level driver does not need to use them. The - * limits for each channel will be provided in 'struct ieee80211_conf' when - * configuring the low-level driver with hw->config callback. If a device has - * a default regulatory domain, IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED - * can be set to let the driver configure all fields */ -struct ieee80211_channel { - short chan; /* channel number (IEEE 802.11) */ - short freq; /* frequency in MHz */ - int val; /* hw specific value for the channel */ - int flag; /* flag for hostapd use (IEEE80211_CHAN_*) */ - unsigned char power_level; - unsigned char antenna_max; -}; - -#define IEEE80211_RATE_ERP 0x00000001 -#define IEEE80211_RATE_BASIC 0x00000002 -#define IEEE80211_RATE_PREAMBLE2 0x00000004 -#define IEEE80211_RATE_SUPPORTED 0x00000010 -#define IEEE80211_RATE_OFDM 0x00000020 -#define IEEE80211_RATE_CCK 0x00000040 -#define IEEE80211_RATE_MANDATORY 0x00000100 - -#define IEEE80211_RATE_CCK_2 (IEEE80211_RATE_CCK | IEEE80211_RATE_PREAMBLE2) -#define IEEE80211_RATE_MODULATION(f) \ - (f & (IEEE80211_RATE_CCK | IEEE80211_RATE_OFDM)) - -/* Low-level driver should set PREAMBLE2, OFDM and CCK flags. - * BASIC, SUPPORTED, ERP, and MANDATORY flags are set in 80211.o based on the - * configuration. */ -struct ieee80211_rate { - int rate; /* rate in 100 kbps */ - int val; /* hw specific value for the rate */ - int flags; /* IEEE80211_RATE_ flags */ - int val2; /* hw specific value for the rate when using short preamble - * (only when IEEE80211_RATE_PREAMBLE2 flag is set, i.e., for - * 2, 5.5, and 11 Mbps) */ - signed char min_rssi_ack; - unsigned char min_rssi_ack_delta; - - /* following fields are set by 80211.o and need not be filled by the - * low-level driver */ - int rate_inv; /* inverse of the rate (LCM(all rates) / rate) for - * optimizing channel utilization estimates */ -}; - -/** - * enum ieee80211_phymode - PHY modes - * - * @MODE_IEEE80211A: 5GHz as defined by 802.11a/802.11h - * @MODE_IEEE80211B: 2.4 GHz as defined by 802.11b - * @MODE_IEEE80211G: 2.4 GHz as defined by 802.11g (with OFDM), - * backwards compatible with 11b mode - * @NUM_IEEE80211_MODES: internal */ -enum ieee80211_phymode { - MODE_IEEE80211A, - MODE_IEEE80211B, - MODE_IEEE80211G, - - /* keep last */ - NUM_IEEE80211_MODES -}; /** - * struct ieee80211_ht_info - describing STA's HT capabilities + * DOC: mac80211 workqueue + * + * mac80211 provides its own workqueue for drivers and internal mac80211 use. + * The workqueue is a single threaded workqueue and can only be accessed by + * helpers for sanity checking. Drivers must ensure all work added onto the + * mac80211 workqueue should be cancelled on the driver stop() callback. + * + * mac80211 will flushed the workqueue upon interface removal and during + * suspend. * - * This structure describes most essential parameters needed - * to describe 802.11n HT capabilities for an STA. + * All work performed on the mac80211 workqueue must not acquire the RTNL lock. * - * @ht_supported: is HT supported by STA, 0: no, 1: yes - * @cap: HT capabilities map as described in 802.11n spec - * @ampdu_factor: Maximum A-MPDU length factor - * @ampdu_density: Minimum A-MPDU spacing - * @supp_mcs_set: Supported MCS set as described in 802.11n spec */ -struct ieee80211_ht_info { - u8 ht_supported; - u16 cap; /* use IEEE80211_HT_CAP_ */ - u8 ampdu_factor; - u8 ampdu_density; - u8 supp_mcs_set[16]; -}; + +struct device; /** - * struct ieee80211_ht_bss_info - describing BSS's HT characteristics - * - * This structure describes most essential parameters needed - * to describe 802.11n HT characteristics in a BSS + * enum ieee80211_max_queues - maximum number of queues * - * @primary_channel: channel number of primery channel - * @bss_cap: 802.11n's general BSS capabilities (e.g. channel width) - * @bss_op_mode: 802.11n's BSS operation modes (e.g. HT protection) + * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. + * @IEEE80211_MAX_QUEUE_MAP: bitmap with maximum queues set */ -struct ieee80211_ht_bss_info { - u8 primary_channel; - u8 bss_cap; /* use IEEE80211_HT_IE_CHA_ */ - u8 bss_op_mode; /* use IEEE80211_HT_IE_ */ +enum ieee80211_max_queues { + IEEE80211_MAX_QUEUES = 16, + IEEE80211_MAX_QUEUE_MAP = BIT(IEEE80211_MAX_QUEUES) - 1, }; +#define IEEE80211_INVAL_HW_QUEUE 0xff + /** - * struct ieee80211_hw_mode - PHY mode definition - * - * This structure describes the capabilities supported by the device - * in a single PHY mode. - * - * @list: internal - * @channels: pointer to array of supported channels - * @rates: pointer to array of supported bitrates - * @mode: the PHY mode for this definition - * @num_channels: number of supported channels - * @num_rates: number of supported bitrates - * @ht_info: PHY's 802.11n HT abilities for this mode + * enum ieee80211_ac_numbers - AC numbers as used in mac80211 + * @IEEE80211_AC_VO: voice + * @IEEE80211_AC_VI: video + * @IEEE80211_AC_BE: best effort + * @IEEE80211_AC_BK: background */ -struct ieee80211_hw_mode { - struct list_head list; - struct ieee80211_channel *channels; - struct ieee80211_rate *rates; - enum ieee80211_phymode mode; - int num_channels; - int num_rates; - struct ieee80211_ht_info ht_info; +enum ieee80211_ac_numbers { + IEEE80211_AC_VO = 0, + IEEE80211_AC_VI = 1, + IEEE80211_AC_BE = 2, + IEEE80211_AC_BK = 3, }; +#define IEEE80211_NUM_ACS 4 /** * struct ieee80211_tx_queue_params - transmit queue configuration * * The information provided in this structure is required for QoS - * transmit queue configuration. + * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29. * - * @aifs: arbitration interface space [0..255, -1: use default] - * @cw_min: minimum contention window [will be a value of the form - * 2^n-1 in the range 1..1023; 0: use default] + * @aifs: arbitration interframe space [0..255] + * @cw_min: minimum contention window [a value of the form + * 2^n-1 in the range 1..32767] * @cw_max: maximum contention window [like @cw_min] - * @burst_time: maximum burst time in units of 0.1ms, 0 meaning disabled + * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled + * @acm: is mandatory admission control required for the access category + * @uapsd: is U-APSD mode enabled for the queue */ struct ieee80211_tx_queue_params { - int aifs; - int cw_min; - int cw_max; - int burst_time; + u16 txop; + u16 cw_min; + u16 cw_max; + u8 aifs; + bool acm; + bool uapsd; +}; + +struct ieee80211_low_level_stats { + unsigned int dot11ACKFailureCount; + unsigned int dot11RTSFailureCount; + unsigned int dot11FCSErrorCount; + unsigned int dot11RTSSuccessCount; }; /** - * struct ieee80211_tx_queue_stats_data - transmit queue statistics - * - * @len: number of packets in queue - * @limit: queue length limit - * @count: number of frames sent + * enum ieee80211_chanctx_change - change flag for channel context + * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed + * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed + * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed + * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel, + * this is used only with channel switching with CSA + * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed */ -struct ieee80211_tx_queue_stats_data { - unsigned int len; - unsigned int limit; - unsigned int count; +enum ieee80211_chanctx_change { + IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0), + IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1), + IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2), + IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3), + IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4), }; /** - * enum ieee80211_tx_queue - transmit queue number - * - * These constants are used with some callbacks that take a - * queue number to set parameters for a queue. - * - * @IEEE80211_TX_QUEUE_DATA0: data queue 0 - * @IEEE80211_TX_QUEUE_DATA1: data queue 1 - * @IEEE80211_TX_QUEUE_DATA2: data queue 2 - * @IEEE80211_TX_QUEUE_DATA3: data queue 3 - * @IEEE80211_TX_QUEUE_DATA4: data queue 4 - * @IEEE80211_TX_QUEUE_SVP: ?? - * @NUM_TX_DATA_QUEUES: number of data queues - * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be - * sent after a beacon - * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames - */ -enum ieee80211_tx_queue { - IEEE80211_TX_QUEUE_DATA0, - IEEE80211_TX_QUEUE_DATA1, - IEEE80211_TX_QUEUE_DATA2, - IEEE80211_TX_QUEUE_DATA3, - IEEE80211_TX_QUEUE_DATA4, - IEEE80211_TX_QUEUE_SVP, - - NUM_TX_DATA_QUEUES, - -/* due to stupidity in the sub-ioctl userspace interface, the items in - * this struct need to have fixed values. As soon as it is removed, we can - * fix these entries. */ - IEEE80211_TX_QUEUE_AFTER_BEACON = 6, - IEEE80211_TX_QUEUE_BEACON = 7 + * struct ieee80211_chanctx_conf - channel context that vifs may be tuned to + * + * This is the driver-visible part. The ieee80211_chanctx + * that contains it is visible in mac80211 only. + * + * @def: the channel definition + * @min_def: the minimum channel definition currently required. + * @rx_chains_static: The number of RX chains that must always be + * active on the channel to receive MIMO transmissions + * @rx_chains_dynamic: The number of RX chains that must be enabled + * after RTS/CTS handshake to receive SMPS MIMO transmissions; + * this will always be >= @rx_chains_static. + * @radar_enabled: whether radar detection is enabled on this channel. + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *), size is determined in hw information. + */ +struct ieee80211_chanctx_conf { + struct cfg80211_chan_def def; + struct cfg80211_chan_def min_def; + + u8 rx_chains_static, rx_chains_dynamic; + + bool radar_enabled; + + u8 drv_priv[0] __aligned(sizeof(void *)); }; -struct ieee80211_tx_queue_stats { - struct ieee80211_tx_queue_stats_data data[NUM_TX_DATA_QUEUES]; +/** + * enum ieee80211_chanctx_switch_mode - channel context switch mode + * @CHANCTX_SWMODE_REASSIGN_VIF: Both old and new contexts already + * exist (and will continue to exist), but the virtual interface + * needs to be switched from one to the other. + * @CHANCTX_SWMODE_SWAP_CONTEXTS: The old context exists but will stop + * to exist with this call, the new context doesn't exist but + * will be active after this call, the virtual interface switches + * from the old to the new (note that the driver may of course + * implement this as an on-the-fly chandef switch of the existing + * hardware context, but the mac80211 pointer for the old context + * will cease to exist and only the new one will later be used + * for changes/removal.) + */ +enum ieee80211_chanctx_switch_mode { + CHANCTX_SWMODE_REASSIGN_VIF, + CHANCTX_SWMODE_SWAP_CONTEXTS, }; -struct ieee80211_low_level_stats { - unsigned int dot11ACKFailureCount; - unsigned int dot11RTSFailureCount; - unsigned int dot11FCSErrorCount; - unsigned int dot11RTSSuccessCount; +/** + * struct ieee80211_vif_chanctx_switch - vif chanctx switch information + * + * This is structure is used to pass information about a vif that + * needs to switch from one chanctx to another. The + * &ieee80211_chanctx_switch_mode defines how the switch should be + * done. + * + * @vif: the vif that should be switched from old_ctx to new_ctx + * @old_ctx: the old context to which the vif was assigned + * @new_ctx: the new context to which the vif must be assigned + */ +struct ieee80211_vif_chanctx_switch { + struct ieee80211_vif *vif; + struct ieee80211_chanctx_conf *old_ctx; + struct ieee80211_chanctx_conf *new_ctx; }; /** @@ -285,11 +235,77 @@ struct ieee80211_low_level_stats { * also implies a change in the AID. * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed * @BSS_CHANGED_ERP_PREAMBLE: preamble changed + * @BSS_CHANGED_ERP_SLOT: slot timing changed + * @BSS_CHANGED_HT: 802.11n parameters changed + * @BSS_CHANGED_BASIC_RATES: Basic rateset changed + * @BSS_CHANGED_BEACON_INT: Beacon interval changed + * @BSS_CHANGED_BSSID: BSSID changed, for whatever + * reason (IBSS and managed mode) + * @BSS_CHANGED_BEACON: Beacon data changed, retrieve + * new beacon (beaconing modes) + * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be + * enabled/disabled (beaconing modes) + * @BSS_CHANGED_CQM: Connection quality monitor config changed + * @BSS_CHANGED_IBSS: IBSS join status changed + * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. + * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note + * that it is only ever disabled for station mode. + * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. + * @BSS_CHANGED_SSID: SSID changed for this BSS (AP and IBSS mode) + * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) + * @BSS_CHANGED_PS: PS changed for this BSS (STA mode) + * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface + * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) + * changed (currently only in P2P client mode, GO mode will be later) + * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: + * currently dtim_period only is under consideration. + * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, + * note that this is only called when it changes after the channel + * context had been assigned. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, BSS_CHANGED_ERP_CTS_PROT = 1<<1, BSS_CHANGED_ERP_PREAMBLE = 1<<2, + BSS_CHANGED_ERP_SLOT = 1<<3, + BSS_CHANGED_HT = 1<<4, + BSS_CHANGED_BASIC_RATES = 1<<5, + BSS_CHANGED_BEACON_INT = 1<<6, + BSS_CHANGED_BSSID = 1<<7, + BSS_CHANGED_BEACON = 1<<8, + BSS_CHANGED_BEACON_ENABLED = 1<<9, + BSS_CHANGED_CQM = 1<<10, + BSS_CHANGED_IBSS = 1<<11, + BSS_CHANGED_ARP_FILTER = 1<<12, + BSS_CHANGED_QOS = 1<<13, + BSS_CHANGED_IDLE = 1<<14, + BSS_CHANGED_SSID = 1<<15, + BSS_CHANGED_AP_PROBE_RESP = 1<<16, + BSS_CHANGED_PS = 1<<17, + BSS_CHANGED_TXPOWER = 1<<18, + BSS_CHANGED_P2P_PS = 1<<19, + BSS_CHANGED_BEACON_INFO = 1<<20, + BSS_CHANGED_BANDWIDTH = 1<<21, + + /* when adding here, make sure to change ieee80211_reconfig */ +}; + +/* + * The maximum number of IPv4 addresses listed for ARP filtering. If the number + * of addresses for an interface increase beyond this value, hardware ARP + * filtering will be disabled. + */ +#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4 + +/** + * enum ieee80211_rssi_event - RSSI threshold event + * An indicator for when RSSI goes below/above a certain threshold. + * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver. + * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver. + */ +enum ieee80211_rssi_event { + RSSI_EVENT_HIGH, + RSSI_EVENT_LOW, }; /** @@ -299,80 +315,505 @@ enum ieee80211_bss_change { * to that BSS) that can change during the lifetime of the BSS. * * @assoc: association status + * @ibss_joined: indicates whether this station is part of an IBSS + * or not + * @ibss_creator: indicates if a new IBSS network is being created * @aid: association ID number, valid only when @assoc is true * @use_cts_prot: use CTS protection - * @use_short_preamble: use 802.11b short preamble + * @use_short_preamble: use 802.11b short preamble; + * if the hardware cannot handle this it must set the + * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag + * @use_short_slot: use short slot time (only relevant for ERP); + * if the hardware cannot handle this it must set the + * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag + * @dtim_period: num of beacons before the next DTIM, for beaconing, + * valid in station mode only if after the driver was notified + * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. + * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old + * as it may have been received during scanning long ago). If the + * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can + * only come from a beacon, but might not become valid until after + * association when a beacon is received (which is notified with the + * %BSS_CHANGED_DTIM flag.) + * @sync_device_ts: the device timestamp corresponding to the sync_tsf, + * the driver/device can use this to calculate synchronisation + * (see @sync_tsf) + * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY + * is requested, see @sync_tsf/@sync_device_ts. + * @beacon_int: beacon interval + * @assoc_capability: capabilities taken from assoc resp + * @basic_rates: bitmap of basic rates, each bit stands for an + * index into the rate table configured by the driver in + * the current band. + * @beacon_rate: associated AP's beacon TX rate + * @mcast_rate: per-band multicast rate index + 1 (0: disabled) + * @bssid: The BSSID for this BSS + * @enable_beacon: whether beaconing should be enabled or not + * @chandef: Channel definition for this BSS -- the hardware might be + * configured a higher bandwidth than this BSS uses, for example. + * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. + * This field is only valid when the channel type is one of the HT types. + * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value + * implies disabled + * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis + * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The + * may filter ARP queries targeted for other addresses than listed here. + * The driver must allow ARP queries targeted for all address listed here + * to pass through. An empty list implies no ARP queries need to pass. + * @arp_addr_cnt: Number of addresses currently on the list. Note that this + * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list + * array size), it's up to the driver what to do in that case. + * @qos: This is a QoS-enabled BSS. + * @idle: This interface is idle. There's also a global idle flag in the + * hardware config which may be more appropriate depending on what + * your driver/device needs to do. + * @ps: power-save mode (STA only). This flag is NOT affected by + * offchannel/dynamic_ps operations. + * @ssid: The SSID of the current vif. Valid in AP and IBSS mode. + * @ssid_len: Length of SSID given in @ssid. + * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. + * @txpower: TX power in dBm + * @p2p_noa_attr: P2P NoA attribute for P2P powersave */ struct ieee80211_bss_conf { + const u8 *bssid; /* association related data */ - bool assoc; + bool assoc, ibss_joined; + bool ibss_creator; u16 aid; /* erp related data */ bool use_cts_prot; bool use_short_preamble; + bool use_short_slot; + bool enable_beacon; + u8 dtim_period; + u16 beacon_int; + u16 assoc_capability; + u64 sync_tsf; + u32 sync_device_ts; + u8 sync_dtim_count; + u32 basic_rates; + struct ieee80211_rate *beacon_rate; + int mcast_rate[IEEE80211_NUM_BANDS]; + u16 ht_operation_mode; + s32 cqm_rssi_thold; + u32 cqm_rssi_hyst; + struct cfg80211_chan_def chandef; + __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + int arp_addr_cnt; + bool qos; + bool idle; + bool ps; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + size_t ssid_len; + bool hidden_ssid; + int txpower; + struct ieee80211_p2p_noa_attr p2p_noa_attr; +}; + +/** + * enum mac80211_tx_info_flags - flags to describe transmission information/status + * + * These flags are used with the @flags member of &ieee80211_tx_info. + * + * @IEEE80211_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. + * @IEEE80211_TX_CTL_ASSIGN_SEQ: The driver has to assign a sequence + * number to this frame, taking care of not overwriting the fragment + * number and increasing the sequence number only when the + * IEEE80211_TX_CTL_FIRST_FRAGMENT flag is set. mac80211 will properly + * assign sequence numbers to QoS-data frames but cannot do so correctly + * for non-QoS-data and management frames because beacons need them from + * that counter as well and mac80211 cannot guarantee proper sequencing. + * If this flag is set, the driver should instruct the hardware to + * assign a sequence number to the frame or assign one itself. Cf. IEEE + * 802.11-2007 7.1.3.4.1 paragraph 3. This flag will always be set for + * beacons and always be clear for frames without a sequence number field. + * @IEEE80211_TX_CTL_NO_ACK: tell the low level not to wait for an ack + * @IEEE80211_TX_CTL_CLEAR_PS_FILT: clear powersave filter for destination + * station + * @IEEE80211_TX_CTL_FIRST_FRAGMENT: this is a first fragment of the frame + * @IEEE80211_TX_CTL_SEND_AFTER_DTIM: send this frame after DTIM beacon + * @IEEE80211_TX_CTL_AMPDU: this frame should be sent as part of an A-MPDU + * @IEEE80211_TX_CTL_INJECTED: Frame was injected, internal to mac80211. + * @IEEE80211_TX_STAT_TX_FILTERED: The frame was not transmitted + * because the destination STA was in powersave mode. Note that to + * avoid race conditions, the filter must be set by the hardware or + * firmware upon receiving a frame that indicates that the station + * went to sleep (must be done on device to filter frames already on + * the queue) and may only be unset after mac80211 gives the OK for + * that by setting the IEEE80211_TX_CTL_CLEAR_PS_FILT (see above), + * since only then is it guaranteed that no more frames are in the + * hardware queue. + * @IEEE80211_TX_STAT_ACK: Frame was acknowledged + * @IEEE80211_TX_STAT_AMPDU: The frame was aggregated, so status + * is for the whole aggregation. + * @IEEE80211_TX_STAT_AMPDU_NO_BACK: no block ack was returned, + * so consider using block ack request (BAR). + * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be + * set by rate control algorithms to indicate probe rate, will + * be cleared for fragmented frames (except on the last fragment) + * @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate + * that a frame can be transmitted while the queues are stopped for + * off-channel operation. + * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211, + * used to indicate that a pending frame requires TX processing before + * it can be sent out. + * @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211, + * used to indicate that a frame was already retried due to PS + * @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211, + * used to indicate frame should not be encrypted + * @IEEE80211_TX_CTL_NO_PS_BUFFER: This frame is a response to a poll + * frame (PS-Poll or uAPSD) or a non-bufferable MMPDU and must + * be sent although the station is in powersave mode. + * @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the + * transmit function after the current frame, this can be used + * by drivers to kick the DMA queue only if unset or when the + * queue gets full. + * @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted + * after TX status because the destination was asleep, it must not + * be modified again (no seqno assignment, crypto, etc.) + * @IEEE80211_TX_INTFL_MLME_CONN_TX: This frame was transmitted by the MLME + * code for connection establishment, this indicates that its status + * should kick the MLME state machine. + * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211 + * MLME command (internal to mac80211 to figure out whether to send TX + * status to user space) + * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame + * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this + * frame and selects the maximum number of streams that it can use. + * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on + * the off-channel channel when a remain-on-channel offload is done + * in hardware -- normal packets still flow and are expected to be + * handled properly by the device. + * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP + * testing. It will be sent out with incorrect Michael MIC key to allow + * TKIP countermeasures to be tested. + * @IEEE80211_TX_CTL_NO_CCK_RATE: This frame will be sent at non CCK rate. + * This flag is actually used for management frame especially for P2P + * frames not being sent at CCK rate in 2GHz band. + * @IEEE80211_TX_STATUS_EOSP: This packet marks the end of service period, + * when its status is reported the service period ends. For frames in + * an SP that mac80211 transmits, it is already set; for driver frames + * the driver may set this flag. It is also used to do the same for + * PS-Poll responses. + * @IEEE80211_TX_CTL_USE_MINRATE: This frame will be sent at lowest rate. + * This flag is used to send nullfunc frame at minimum rate when + * the nullfunc is used for connection monitoring purpose. + * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it + * would be fragmented by size (this is optional, only used for + * monitor injection). + * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll + * frame (PS-Poll or uAPSD). + * + * Note: If you have to add new flags to the enumeration, then don't + * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. + */ +enum mac80211_tx_info_flags { + IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), + IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1), + IEEE80211_TX_CTL_NO_ACK = BIT(2), + IEEE80211_TX_CTL_CLEAR_PS_FILT = BIT(3), + IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(4), + IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(5), + IEEE80211_TX_CTL_AMPDU = BIT(6), + IEEE80211_TX_CTL_INJECTED = BIT(7), + IEEE80211_TX_STAT_TX_FILTERED = BIT(8), + IEEE80211_TX_STAT_ACK = BIT(9), + IEEE80211_TX_STAT_AMPDU = BIT(10), + IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), + IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), + IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13), + IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14), + IEEE80211_TX_INTFL_RETRIED = BIT(15), + IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16), + IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17), + IEEE80211_TX_CTL_MORE_FRAMES = BIT(18), + IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19), + IEEE80211_TX_INTFL_MLME_CONN_TX = BIT(20), + IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21), + IEEE80211_TX_CTL_LDPC = BIT(22), + IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24), + IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25), + IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26), + IEEE80211_TX_CTL_NO_CCK_RATE = BIT(27), + IEEE80211_TX_STATUS_EOSP = BIT(28), + IEEE80211_TX_CTL_USE_MINRATE = BIT(29), + IEEE80211_TX_CTL_DONTFRAG = BIT(30), + IEEE80211_TX_CTL_PS_RESPONSE = BIT(31), }; -/* Transmit control fields. This data structure is passed to low-level driver - * with each TX frame. The low-level driver is responsible for configuring - * the hardware to use given values (depending on what is supported). */ +#define IEEE80211_TX_CTL_STBC_SHIFT 23 -struct ieee80211_tx_control { - struct ieee80211_vif *vif; - int tx_rate; /* Transmit rate, given as the hw specific value for the - * rate (from struct ieee80211_rate) */ - int rts_cts_rate; /* Transmit rate for RTS/CTS frame, given as the hw - * specific value for the rate (from - * struct ieee80211_rate) */ - -#define IEEE80211_TXCTL_REQ_TX_STATUS (1<<0)/* request TX status callback for - * this frame */ -#define IEEE80211_TXCTL_DO_NOT_ENCRYPT (1<<1) /* send this frame without - * encryption; e.g., for EAPOL - * frames */ -#define IEEE80211_TXCTL_USE_RTS_CTS (1<<2) /* use RTS-CTS before sending - * frame */ -#define IEEE80211_TXCTL_USE_CTS_PROTECT (1<<3) /* use CTS protection for the - * frame (e.g., for combined - * 802.11g / 802.11b networks) */ -#define IEEE80211_TXCTL_NO_ACK (1<<4) /* tell the low level not to - * wait for an ack */ -#define IEEE80211_TXCTL_RATE_CTRL_PROBE (1<<5) -#define IEEE80211_TXCTL_CLEAR_DST_MASK (1<<6) -#define IEEE80211_TXCTL_REQUEUE (1<<7) -#define IEEE80211_TXCTL_FIRST_FRAGMENT (1<<8) /* this is a first fragment of - * the frame */ -#define IEEE80211_TXCTL_LONG_RETRY_LIMIT (1<<10) /* this frame should be send - * using the through - * set_retry_limit configured - * long retry value */ -#define IEEE80211_TXCTL_EAPOL_FRAME (1<<11) /* internal to mac80211 */ -#define IEEE80211_TXCTL_SEND_AFTER_DTIM (1<<12) /* send this frame after DTIM - * beacon */ - u32 flags; /* tx control flags defined - * above */ - u8 key_idx; /* keyidx from hw->set_key(), undefined if - * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */ - u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. - * This could be used when set_retry_limit - * is not implemented by the driver */ - u8 power_level; /* per-packet transmit power level, in dBm */ - u8 antenna_sel_tx; /* 0 = default/diversity, 1 = Ant0, 2 = Ant1 */ - u8 icv_len; /* length of the ICV/MIC field in octets */ - u8 iv_len; /* length of the IV field in octets */ - u8 queue; /* hardware queue to use for this frame; - * 0 = highest, hw->queues-1 = lowest */ - struct ieee80211_rate *rate; /* internal 80211.o rate */ - struct ieee80211_rate *rts_rate; /* internal 80211.o rate - * for RTS/CTS */ - int alt_retry_rate; /* retry rate for the last retries, given as the - * hw specific value for the rate (from - * struct ieee80211_rate). To be used to limit - * packet dropping when probing higher rates, if hw - * supports multiple retry rates. -1 = not used */ - int type; /* internal */ +/** + * enum mac80211_tx_control_flags - flags to describe transmit control + * + * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control + * protocol frame (e.g. EAP) + * + * These flags are used in tx_info->control.flags. + */ +enum mac80211_tx_control_flags { + IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), +}; + +/* + * This definition is used as a mask to clear all temporary flags, which are + * set by the tx handlers for each transmission attempt by the mac80211 stack. + */ +#define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \ + IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \ + IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \ + IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \ + IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \ + IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_NO_PS_BUFFER | \ + IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \ + IEEE80211_TX_CTL_STBC | IEEE80211_TX_STATUS_EOSP) + +/** + * enum mac80211_rate_control_flags - per-rate flags set by the + * Rate Control algorithm. + * + * These flags are set by the Rate control algorithm for each rate during tx, + * in the @flags member of struct ieee80211_tx_rate. + * + * @IEEE80211_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate. + * @IEEE80211_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required. + * This is set if the current BSS requires ERP protection. + * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble. + * @IEEE80211_TX_RC_MCS: HT rate. + * @IEEE80211_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is split + * into a higher 4 bits (Nss) and lower 4 bits (MCS number) + * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in + * Greenfield mode. + * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz. + * @IEEE80211_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission + * @IEEE80211_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission + * (80+80 isn't supported yet) + * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the + * adjacent 20 MHz channels, if the current channel type is + * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS. + * @IEEE80211_TX_RC_SHORT_GI: Short Guard interval should be used for this rate. + */ +enum mac80211_rate_control_flags { + IEEE80211_TX_RC_USE_RTS_CTS = BIT(0), + IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1), + IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(2), + + /* rate index is an HT/VHT MCS instead of an index */ + IEEE80211_TX_RC_MCS = BIT(3), + IEEE80211_TX_RC_GREEN_FIELD = BIT(4), + IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(5), + IEEE80211_TX_RC_DUP_DATA = BIT(6), + IEEE80211_TX_RC_SHORT_GI = BIT(7), + IEEE80211_TX_RC_VHT_MCS = BIT(8), + IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(9), + IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(10), }; +/* there are 40 bytes if you don't need the rateset to be kept */ +#define IEEE80211_TX_INFO_DRIVER_DATA_SIZE 40 + +/* if you do need the rateset, then you have less space */ +#define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24 + +/* maximum number of rate stages */ +#define IEEE80211_TX_MAX_RATES 4 + +/* maximum number of rate table entries */ +#define IEEE80211_TX_RATE_TABLE_SIZE 4 + +/** + * struct ieee80211_tx_rate - rate selection/status + * + * @idx: rate index to attempt to send with + * @flags: rate control flags (&enum mac80211_rate_control_flags) + * @count: number of tries in this rate before going to the next rate + * + * A value of -1 for @idx indicates an invalid rate and, if used + * in an array of retry rates, that no more rates should be tried. + * + * When used for transmit status reporting, the driver should + * always report the rate along with the flags it used. + * + * &struct ieee80211_tx_info contains an array of these structs + * in the control information, and it will be filled by the rate + * control algorithm according to what should be sent. For example, + * if this array contains, in the format { <idx>, <count> } the + * information + * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } + * then this means that the frame should be transmitted + * up to twice at rate 3, up to twice at rate 2, and up to four + * times at rate 1 if it doesn't get acknowledged. Say it gets + * acknowledged by the peer after the fifth attempt, the status + * information should then contain + * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... + * since it was transmitted twice at rate 3, twice at rate 2 + * and once at rate 1 after which we received an acknowledgement. + */ +struct ieee80211_tx_rate { + s8 idx; + u16 count:5, + flags:11; +} __packed; + +#define IEEE80211_MAX_TX_RETRY 31 + +static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate, + u8 mcs, u8 nss) +{ + WARN_ON(mcs & ~0xF); + WARN_ON((nss - 1) & ~0x7); + rate->idx = ((nss - 1) << 4) | mcs; +} + +static inline u8 +ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *rate) +{ + return rate->idx & 0xF; +} + +static inline u8 +ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) +{ + return (rate->idx >> 4) + 1; +} + +/** + * struct ieee80211_tx_info - skb transmit information + * + * This structure is placed in skb->cb for three uses: + * (1) mac80211 TX control - mac80211 tells the driver what to do + * (2) driver internal use (if applicable) + * (3) TX status information - driver tells mac80211 what happened + * + * @flags: transmit info flags, defined above + * @band: the band to transmit on (use for checking for races) + * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC + * @ack_frame_id: internal frame ID for TX status, used internally + * @control: union for control data + * @status: union for status data + * @driver_data: array of driver_data pointers + * @ampdu_ack_len: number of acked aggregated frames. + * relevant only if IEEE80211_TX_STAT_AMPDU was set. + * @ampdu_len: number of aggregated frames. + * relevant only if IEEE80211_TX_STAT_AMPDU was set. + * @ack_signal: signal strength of the ACK frame + */ +struct ieee80211_tx_info { + /* common information */ + u32 flags; + u8 band; + + u8 hw_queue; + + u16 ack_frame_id; + + union { + struct { + union { + /* rate control */ + struct { + struct ieee80211_tx_rate rates[ + IEEE80211_TX_MAX_RATES]; + s8 rts_cts_rate_idx; + u8 use_rts:1; + u8 use_cts_prot:1; + u8 short_preamble:1; + u8 skip_table:1; + /* 2 bytes free */ + }; + /* only needed before rate control */ + unsigned long jiffies; + }; + /* NB: vif can be NULL for injected frames */ + struct ieee80211_vif *vif; + struct ieee80211_key_conf *hw_key; + u32 flags; + /* 4 bytes free */ + } control; + struct { + struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; + s32 ack_signal; + u8 ampdu_ack_len; + u8 ampdu_len; + u8 antenna; + void *status_driver_data[21 / sizeof(void *)]; + } status; + struct { + struct ieee80211_tx_rate driver_rates[ + IEEE80211_TX_MAX_RATES]; + u8 pad[4]; + + void *rate_driver_data[ + IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE / sizeof(void *)]; + }; + void *driver_data[ + IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)]; + }; +}; + +/** + * struct ieee80211_sched_scan_ies - scheduled scan IEs + * + * This structure is used to pass the appropriate IEs to be used in scheduled + * scans for all bands. It contains both the IEs passed from the userspace + * and the ones generated by mac80211. + * + * @ie: array with the IEs for each supported band + * @len: array with the total length of the IEs for each band + */ +struct ieee80211_sched_scan_ies { + u8 *ie[IEEE80211_NUM_BANDS]; + size_t len[IEEE80211_NUM_BANDS]; +}; + +static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb) +{ + return (struct ieee80211_tx_info *)skb->cb; +} + +static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb) +{ + return (struct ieee80211_rx_status *)skb->cb; +} + +/** + * ieee80211_tx_info_clear_status - clear TX status + * + * @info: The &struct ieee80211_tx_info to be cleared. + * + * When the driver passes an skb back to mac80211, it must report + * a number of things in TX status. This function clears everything + * in the TX status but the rate control information (it does clear + * the count since you need to fill that in anyway). + * + * NOTE: You can only use this function if you do NOT use + * info->driver_data! Use info->rate_driver_data + * instead if you need only the less space that allows. + */ +static inline void +ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) +{ + int i; + + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != + offsetof(struct ieee80211_tx_info, control.rates)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != + offsetof(struct ieee80211_tx_info, driver_rates)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != 8); + /* clear the rate counts */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) + info->status.rates[i].count = 0; + + BUILD_BUG_ON( + offsetof(struct ieee80211_tx_info, status.ack_signal) != 20); + memset(&info->status.ampdu_ack_len, 0, + sizeof(struct ieee80211_tx_info) - + offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); +} + + /** * enum mac80211_rx_flags - receive flags * @@ -380,7 +821,6 @@ struct ieee80211_tx_control { * @RX_FLAG_MMIC_ERROR: Michael MIC error was reported on this frame. * Use together with %RX_FLAG_MMIC_STRIPPED. * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. - * @RX_FLAG_RADIOTAP: This frame starts with a radiotap header. * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, * verification has been done by the hardware. * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. @@ -390,18 +830,97 @@ struct ieee80211_tx_control { * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on * the frame. - * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) - * is valid. + * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime + * field) is valid and contains the time the first symbol of the MPDU + * was received. This is useful in monitor mode and for proper IBSS + * merging. + * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime + * field) is valid and contains the time the last symbol of the MPDU + * (including FCS) was received. + * @RX_FLAG_SHORTPRE: Short preamble was used for this frame + * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index + * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index + * @RX_FLAG_40MHZ: HT40 (40 MHz) was used + * @RX_FLAG_SHORT_GI: Short guard interval was used + * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present. + * Valid only for data frames (mainly A-MPDU) + * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if + * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT + * to hw.radiotap_mcs_details to advertise that fact + * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference + * number (@ampdu_reference) must be populated and be a distinct number for + * each A-MPDU + * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes + * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for + * monitoring purposes only + * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all + * subframes of a single A-MPDU + * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU + * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected + * on this subframe + * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC + * is stored in the @ampdu_delimiter_crc field) + * @RX_FLAG_LDPC: LDPC was used + * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 + * @RX_FLAG_10MHZ: 10 MHz (half channel) was used + * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used + * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU + * subframes instead of a one huge frame for performance reasons. + * All, but the last MSDU from an A-MSDU should have this flag set. E.g. + * if an A-MSDU has 3 frames, the first 2 must have the flag set, while + * the 3rd (last) one must not have this flag set. The flag is used to + * deal with retransmission/duplication recovery properly since A-MSDU + * subframes share the same sequence number. Reported subframes can be + * either regular MSDU or singly A-MSDUs. Subframes must not be + * interleaved with other frames. */ enum mac80211_rx_flags { - RX_FLAG_MMIC_ERROR = 1<<0, - RX_FLAG_DECRYPTED = 1<<1, - RX_FLAG_RADIOTAP = 1<<2, - RX_FLAG_MMIC_STRIPPED = 1<<3, - RX_FLAG_IV_STRIPPED = 1<<4, - RX_FLAG_FAILED_FCS_CRC = 1<<5, - RX_FLAG_FAILED_PLCP_CRC = 1<<6, - RX_FLAG_TSFT = 1<<7, + RX_FLAG_MMIC_ERROR = BIT(0), + RX_FLAG_DECRYPTED = BIT(1), + RX_FLAG_MMIC_STRIPPED = BIT(3), + RX_FLAG_IV_STRIPPED = BIT(4), + RX_FLAG_FAILED_FCS_CRC = BIT(5), + RX_FLAG_FAILED_PLCP_CRC = BIT(6), + RX_FLAG_MACTIME_START = BIT(7), + RX_FLAG_SHORTPRE = BIT(8), + RX_FLAG_HT = BIT(9), + RX_FLAG_40MHZ = BIT(10), + RX_FLAG_SHORT_GI = BIT(11), + RX_FLAG_NO_SIGNAL_VAL = BIT(12), + RX_FLAG_HT_GF = BIT(13), + RX_FLAG_AMPDU_DETAILS = BIT(14), + RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15), + RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16), + RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), + RX_FLAG_AMPDU_IS_LAST = BIT(18), + RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), + RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20), + RX_FLAG_MACTIME_END = BIT(21), + RX_FLAG_VHT = BIT(22), + RX_FLAG_LDPC = BIT(23), + RX_FLAG_STBC_MASK = BIT(26) | BIT(27), + RX_FLAG_10MHZ = BIT(28), + RX_FLAG_5MHZ = BIT(29), + RX_FLAG_AMSDU_MORE = BIT(30), +}; + +#define RX_FLAG_STBC_SHIFT 26 + +/** + * enum mac80211_rx_vht_flags - receive VHT flags + * + * These flags are used with the @vht_flag member of + * &struct ieee80211_rx_status. + * @RX_VHT_FLAG_80MHZ: 80 MHz was used + * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used + * @RX_VHT_FLAG_160MHZ: 160 MHz was used + * @RX_VHT_FLAG_BF: packet was beamformed + */ +enum mac80211_rx_vht_flags { + RX_VHT_FLAG_80MHZ = BIT(0), + RX_VHT_FLAG_80P80MHZ = BIT(1), + RX_VHT_FLAG_160MHZ = BIT(2), + RX_VHT_FLAG_BF = BIT(3), }; /** @@ -409,90 +928,123 @@ enum mac80211_rx_flags { * * The low-level driver should provide this information (the subset * supported by hardware) to the 802.11 code with each received - * frame. - * @mactime: MAC timestamp as defined by 802.11 + * frame, in the skb's control buffer (cb). + * + * @mactime: value in microseconds of the 64-bit Time Synchronization Function + * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. + * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use + * it but can store it and pass it back to the driver for synchronisation + * @band: the active band when this frame was received * @freq: frequency the radio was tuned to when receiving this frame, in MHz - * @channel: channel the radio was tuned to - * @phymode: active PHY mode - * @ssi: signal strength when receiving this frame - * @signal: used as 'qual' in statistics reporting - * @noise: PHY noise when receiving this frame + * @signal: signal strength when receiving this frame, either in dBm, in dB or + * unspecified depending on the hardware capabilities flags + * @IEEE80211_HW_SIGNAL_* + * @chains: bitmask of receive chains for which separate signal strength + * values were filled. + * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't + * support dB or unspecified units) * @antenna: antenna used - * @rate: data rate + * @rate_idx: index of data rate into band's supported rates or MCS index if + * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) + * @vht_nss: number of streams (VHT only) * @flag: %RX_FLAG_* + * @vht_flag: %RX_VHT_FLAG_* + * @rx_flags: internal RX flags for mac80211 + * @ampdu_reference: A-MPDU reference number, must be a different value for + * each A-MPDU but the same for each subframe within one A-MPDU + * @ampdu_delimiter_crc: A-MPDU delimiter CRC */ struct ieee80211_rx_status { u64 mactime; - int freq; - int channel; - enum ieee80211_phymode phymode; - int ssi; - int signal; - int noise; - int antenna; - int rate; - int flag; + u32 device_timestamp; + u32 ampdu_reference; + u32 flag; + u16 freq; + u8 vht_flag; + u8 rate_idx; + u8 vht_nss; + u8 rx_flags; + u8 band; + u8 antenna; + s8 signal; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + u8 ampdu_delimiter_crc; }; /** - * enum ieee80211_tx_status_flags - transmit status flags - * - * Status flags to indicate various transmit conditions. + * enum ieee80211_conf_flags - configuration flags * - * @IEEE80211_TX_STATUS_TX_FILTERED: The frame was not transmitted - * because the destination STA was in powersave mode. + * Flags to define PHY configuration options * - * @IEEE80211_TX_STATUS_ACK: Frame was acknowledged + * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this + * to determine for example whether to calculate timestamps for packets + * or not, do not use instead of filter flags! + * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only). + * This is the power save mode defined by IEEE 802.11-2007 section 11.2, + * meaning that the hardware still wakes up for beacons, is able to + * transmit frames and receive the possible acknowledgment frames. + * Not to be confused with hardware specific wakeup/sleep states, + * driver is responsible for that. See the section "Powersave support" + * for more. + * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set + * the driver should be prepared to handle configuration requests but + * may turn the device off as much as possible. Typically, this flag will + * be set when an interface is set UP but not associated or scanning, but + * it can also be unset in that case when monitor interfaces are active. + * @IEEE80211_CONF_OFFCHANNEL: The device is currently not on its main + * operating channel. */ -enum ieee80211_tx_status_flags { - IEEE80211_TX_STATUS_TX_FILTERED = 1<<0, - IEEE80211_TX_STATUS_ACK = 1<<1, +enum ieee80211_conf_flags { + IEEE80211_CONF_MONITOR = (1<<0), + IEEE80211_CONF_PS = (1<<1), + IEEE80211_CONF_IDLE = (1<<2), + IEEE80211_CONF_OFFCHANNEL = (1<<3), }; + /** - * struct ieee80211_tx_status - transmit status - * - * As much information as possible should be provided for each transmitted - * frame with ieee80211_tx_status(). - * - * @control: a copy of the &struct ieee80211_tx_control passed to the driver - * in the tx() callback. - * - * @flags: transmit status flags, defined above - * - * @ack_signal: signal strength of the ACK frame - * - * @excessive_retries: set to 1 if the frame was retried many times - * but not acknowledged - * - * @retry_count: number of retries - * - * @queue_length: ?? REMOVE - * @queue_number: ?? REMOVE + * enum ieee80211_conf_changed - denotes which configuration changed + * + * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed + * @IEEE80211_CONF_CHANGE_MONITOR: the monitor flag changed + * @IEEE80211_CONF_CHANGE_PS: the PS flag or dynamic PS timeout changed + * @IEEE80211_CONF_CHANGE_POWER: the TX power changed + * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed + * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed + * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed + * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed + * Note that this is only valid if channel contexts are not used, + * otherwise each channel context has the number of chains listed. */ -struct ieee80211_tx_status { - struct ieee80211_tx_control control; - u8 flags; - bool excessive_retries; - u8 retry_count; - int ack_signal; - int queue_length; - int queue_number; +enum ieee80211_conf_changed { + IEEE80211_CONF_CHANGE_SMPS = BIT(1), + IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2), + IEEE80211_CONF_CHANGE_MONITOR = BIT(3), + IEEE80211_CONF_CHANGE_PS = BIT(4), + IEEE80211_CONF_CHANGE_POWER = BIT(5), + IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), + IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), + IEEE80211_CONF_CHANGE_IDLE = BIT(8), }; /** - * enum ieee80211_conf_flags - configuration flags + * enum ieee80211_smps_mode - spatial multiplexing power save mode * - * Flags to define PHY configuration options - * - * @IEEE80211_CONF_SHORT_SLOT_TIME: use 802.11g short slot time - * @IEEE80211_CONF_RADIOTAP: add radiotap header at receive time (if supported) - * @IEEE80211_CONF_SUPPORT_HT_MODE: use 802.11n HT capabilities (if supported) + * @IEEE80211_SMPS_AUTOMATIC: automatic + * @IEEE80211_SMPS_OFF: off + * @IEEE80211_SMPS_STATIC: static + * @IEEE80211_SMPS_DYNAMIC: dynamic + * @IEEE80211_SMPS_NUM_MODES: internal, don't use */ -enum ieee80211_conf_flags { - IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0), - IEEE80211_CONF_RADIOTAP = (1<<1), - IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2), +enum ieee80211_smps_mode { + IEEE80211_SMPS_AUTOMATIC, + IEEE80211_SMPS_OFF, + IEEE80211_SMPS_STATIC, + IEEE80211_SMPS_DYNAMIC, + + /* keep last */ + IEEE80211_SMPS_NUM_MODES, }; /** @@ -500,70 +1052,90 @@ enum ieee80211_conf_flags { * * This struct indicates how the driver shall configure the hardware. * - * @radio_enabled: when zero, driver is required to switch off the radio. - * TODO make a flag - * @channel: IEEE 802.11 channel number - * @freq: frequency in MHz - * @channel_val: hardware specific channel value for the channel - * @phymode: PHY mode to activate (REMOVE) - * @chan: channel to switch to, pointer to the channel information - * @mode: pointer to mode definition - * @regulatory_domain: ?? - * @beacon_int: beacon interval (TODO make interface config) * @flags: configuration flags defined above - * @power_level: transmit power limit for current regulatory domain in dBm - * @antenna_max: maximum antenna gain - * @antenna_sel_tx: transmit antenna selection, 0: default/diversity, - * 1/2: antenna 0/1 - * @antenna_sel_rx: receive antenna selection, like @antenna_sel_tx - * @ht_conf: describes current self configuration of 802.11n HT capabilies - * @ht_bss_conf: describes current BSS configuration of 802.11n HT parameters + * + * @listen_interval: listen interval in units of beacon interval + * @max_sleep_period: the maximum number of beacon intervals to sleep for + * before checking the beacon for a TIM bit (managed mode only); this + * value will be only achievable between DTIM frames, the hardware + * needs to check for the multicast traffic bit in DTIM beacons. + * This variable is valid only when the CONF_PS flag is set. + * @ps_dtim_period: The DTIM period of the AP we're connected to, for use + * in power saving. Power saving will not be enabled until a beacon + * has been received and the DTIM period is known. + * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the + * powersave documentation below. This variable is valid only when + * the CONF_PS flag is set. + * + * @power_level: requested transmit power (in dBm), backward compatibility + * value only that is set to the minimum of all interfaces + * + * @chandef: the channel definition to tune to + * @radar_enabled: whether radar detection is enabled + * + * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame + * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, + * but actually means the number of transmissions not the number of retries + * @short_frame_max_tx_count: Maximum number of transmissions for a "short" + * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the + * number of transmissions not the number of retries + * + * @smps_mode: spatial multiplexing powersave mode; note that + * %IEEE80211_SMPS_STATIC is used when the device is not + * configured for an HT channel. + * Note that this is only valid if channel contexts are not used, + * otherwise each channel context has the number of chains listed. */ struct ieee80211_conf { - int channel; /* IEEE 802.11 channel number */ - int freq; /* MHz */ - int channel_val; /* hw specific value for the channel */ + u32 flags; + int power_level, dynamic_ps_timeout; + int max_sleep_period; - enum ieee80211_phymode phymode; - struct ieee80211_channel *chan; - struct ieee80211_hw_mode *mode; - unsigned int regulatory_domain; - int radio_enabled; + u16 listen_interval; + u8 ps_dtim_period; - int beacon_int; - u32 flags; - u8 power_level; - u8 antenna_max; - u8 antenna_sel_tx; - u8 antenna_sel_rx; + u8 long_frame_max_tx_count, short_frame_max_tx_count; + + struct cfg80211_chan_def chandef; + bool radar_enabled; + enum ieee80211_smps_mode smps_mode; +}; - struct ieee80211_ht_info ht_conf; - struct ieee80211_ht_bss_info ht_bss_conf; +/** + * struct ieee80211_channel_switch - holds the channel switch data + * + * The information provided in this structure is required for channel switch + * operation. + * + * @timestamp: value in microseconds of the 64-bit Time Synchronization + * Function (TSF) timer when the frame containing the channel switch + * announcement was received. This is simply the rx.mactime parameter + * the driver passed into mac80211. + * @block_tx: Indicates whether transmission must be blocked before the + * scheduled channel switch, as indicated by the AP. + * @chandef: the new channel to switch to + * @count: the number of TBTT's until the channel switch event + */ +struct ieee80211_channel_switch { + u64 timestamp; + bool block_tx; + struct cfg80211_chan_def chandef; + u8 count; }; /** - * enum ieee80211_if_types - types of 802.11 network interfaces - * - * @IEEE80211_IF_TYPE_INVALID: invalid interface type, not used - * by mac80211 itself - * @IEEE80211_IF_TYPE_AP: interface in AP mode. - * @IEEE80211_IF_TYPE_MGMT: special interface for communication with hostap - * daemon. Drivers should never see this type. - * @IEEE80211_IF_TYPE_STA: interface in STA (client) mode. - * @IEEE80211_IF_TYPE_IBSS: interface in IBSS (ad-hoc) mode. - * @IEEE80211_IF_TYPE_MNTR: interface in monitor (rfmon) mode. - * @IEEE80211_IF_TYPE_WDS: interface in WDS mode. - * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers - * will never see this type. - */ -enum ieee80211_if_types { - IEEE80211_IF_TYPE_INVALID, - IEEE80211_IF_TYPE_AP, - IEEE80211_IF_TYPE_STA, - IEEE80211_IF_TYPE_IBSS, - IEEE80211_IF_TYPE_MNTR, - IEEE80211_IF_TYPE_WDS, - IEEE80211_IF_TYPE_VLAN, + * enum ieee80211_vif_flags - virtual interface flags + * + * @IEEE80211_VIF_BEACON_FILTER: the device performs beacon filtering + * on this virtual interface to avoid unnecessary CPU wakeups + * @IEEE80211_VIF_SUPPORTS_CQM_RSSI: the device can do connection quality + * monitoring on this virtual interface -- i.e. it can monitor + * connection quality related parameters, such as the RSSI level and + * provide notifications if configured trigger levels are reached. + */ +enum ieee80211_vif_flags { + IEEE80211_VIF_BEACON_FILTER = BIT(0), + IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), }; /** @@ -573,85 +1145,73 @@ enum ieee80211_if_types { * use during the life of a virtual interface. * * @type: type of this virtual interface + * @bss_conf: BSS configuration for this interface, either our own + * or the BSS we're associated to + * @addr: address of this interface + * @p2p: indicates whether this AP or STA interface is a p2p + * interface, i.e. a GO or p2p-sta respectively + * @csa_active: marks whether a channel switch is going on. Internally it is + * write-protected by sdata_lock and local->mtx so holding either is fine + * for read access. + * @driver_flags: flags/capabilities the driver has for this interface, + * these need to be set (or cleared) when the interface is added + * or, if supported by the driver, the interface type is changed + * at runtime, mac80211 will never touch this field + * @hw_queue: hardware queue for each AC + * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only + * @chanctx_conf: The channel context this interface is assigned to, or %NULL + * when it is not assigned. This pointer is RCU-protected due to the TX + * path needing to access it; even though the netdev carrier will always + * be off when it is %NULL there can still be races and packets could be + * processed after it switches back to %NULL. + * @debugfs_dir: debugfs dentry, can be used by drivers to create own per + * interface debug files. Note that it will be NULL for the virtual + * monitor interface (if that is requested.) * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). */ struct ieee80211_vif { - enum ieee80211_if_types type; + enum nl80211_iftype type; + struct ieee80211_bss_conf bss_conf; + u8 addr[ETH_ALEN]; + bool p2p; + bool csa_active; + + u8 cab_queue; + u8 hw_queue[IEEE80211_NUM_ACS]; + + struct ieee80211_chanctx_conf __rcu *chanctx_conf; + + u32 driver_flags; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfs_dir; +#endif + /* must be last */ - u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); + u8 drv_priv[0] __aligned(sizeof(void *)); }; +static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) +{ +#ifdef CONFIG_MAC80211_MESH + return vif->type == NL80211_IFTYPE_MESH_POINT; +#endif + return false; +} + /** - * struct ieee80211_if_init_conf - initial configuration of an interface - * - * @vif: pointer to a driver-use per-interface structure. The pointer - * itself is also used for various functions including - * ieee80211_beacon_get() and ieee80211_get_buffered_bc(). - * @type: one of &enum ieee80211_if_types constants. Determines the type of - * added/removed interface. - * @mac_addr: pointer to MAC address of the interface. This pointer is valid - * until the interface is removed (i.e. it cannot be used after - * remove_interface() callback was called for this interface). + * wdev_to_ieee80211_vif - return a vif struct from a wdev + * @wdev: the wdev to get the vif for * - * This structure is used in add_interface() and remove_interface() - * callbacks of &struct ieee80211_hw. + * This can be used by mac80211 drivers with direct cfg80211 APIs + * (like the vendor commands) that get a wdev. * - * When you allow multiple interfaces to be added to your PHY, take care - * that the hardware can actually handle multiple MAC addresses. However, - * also take care that when there's no interface left with mac_addr != %NULL - * you remove the MAC address from the device to avoid acknowledging packets - * in pure monitor mode. - */ -struct ieee80211_if_init_conf { - enum ieee80211_if_types type; - struct ieee80211_vif *vif; - void *mac_addr; -}; - -/** - * struct ieee80211_if_conf - configuration of an interface - * - * @type: type of the interface. This is always the same as was specified in - * &struct ieee80211_if_init_conf. The type of an interface never changes - * during the life of the interface; this field is present only for - * convenience. - * @bssid: BSSID of the network we are associated to/creating. - * @ssid: used (together with @ssid_len) by drivers for hardware that - * generate beacons independently. The pointer is valid only during the - * config_interface() call, so copy the value somewhere if you need - * it. - * @ssid_len: length of the @ssid field. - * @beacon: beacon template. Valid only if @host_gen_beacon_template in - * &struct ieee80211_hw is set. The driver is responsible of freeing - * the sk_buff. - * @beacon_control: tx_control for the beacon template, this field is only - * valid when the @beacon field was set. - * - * This structure is passed to the config_interface() callback of - * &struct ieee80211_hw. - */ -struct ieee80211_if_conf { - int type; - u8 *bssid; - u8 *ssid; - size_t ssid_len; - struct sk_buff *beacon; - struct ieee80211_tx_control *beacon_control; -}; - -/** - * enum ieee80211_key_alg - key algorithm - * @ALG_WEP: WEP40 or WEP104 - * @ALG_TKIP: TKIP - * @ALG_CCMP: CCMP (AES) + * Note that this function may return %NULL if the given wdev isn't + * associated with a vif that the driver knows about (e.g. monitor + * or AP_VLAN interfaces.) */ -enum ieee80211_key_alg { - ALG_WEP, - ALG_TKIP, - ALG_CCMP, -}; - +struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); /** * enum ieee80211_key_flags - key flags @@ -659,19 +1219,40 @@ enum ieee80211_key_alg { * These flags are used for communication about keys between the driver * and mac80211, with the @flags parameter of &struct ieee80211_key_conf. * - * @IEEE80211_KEY_FLAG_WMM_STA: Set by mac80211, this flag indicates - * that the STA this key will be used with could be using QoS. * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this * particular key. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. + * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates + * that the key is pairwise rather then a shared key. + * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a + * CCMP key if it requires CCMP encryption of management frames (MFP) to + * be done in software. + * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver + * if space should be prepared for the IV, but the IV + * itself should not be generated. Do not set together with + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. + * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received + * management frames. The flag can help drivers that have a hardware + * crypto implementation that doesn't deal with management frames + * properly by allowing them to not upload the keys to hardware and + * fall back to software crypto. Note that this flag deals only with + * RX, if your crypto engine can't deal with TX you can also set the + * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW. + * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the + * driver for a CCMP key to indicate that is requires IV generation + * only for managment frames (MFP). */ enum ieee80211_key_flags { - IEEE80211_KEY_FLAG_WMM_STA = 1<<0, - IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, - IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, + IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), + IEEE80211_KEY_FLAG_GENERATE_IV = BIT(1), + IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(2), + IEEE80211_KEY_FLAG_PAIRWISE = BIT(3), + IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4), + IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5), + IEEE80211_KEY_FLAG_RX_MGMT = BIT(6), }; /** @@ -683,14 +1264,22 @@ enum ieee80211_key_flags { * @hw_key_idx: To be set by the driver, this is the key index the driver * wants to be given when a frame is transmitted and needs to be * encrypted in hardware. - * @alg: The key algorithm. + * @cipher: The key's cipher suite selector. * @flags: key flags, see &enum ieee80211_key_flags. * @keyidx: the key index (0-3) * @keylen: key material length - * @key: key material + * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) + * data block: + * - Temporal Encryption Key (128 bits) + * - Temporal Authenticator Tx MIC Key (64 bits) + * - Temporal Authenticator Rx MIC Key (64 bits) + * @icv_len: The ICV length for this key type + * @iv_len: The IV length for this key type */ struct ieee80211_key_conf { - enum ieee80211_key_alg alg; + u32 cipher; + u8 icv_len; + u8 iv_len; u8 hw_key_idx; u8 flags; s8 keyidx; @@ -699,6 +1288,36 @@ struct ieee80211_key_conf { }; /** + * struct ieee80211_cipher_scheme - cipher scheme + * + * This structure contains a cipher scheme information defining + * the secure packet crypto handling. + * + * @cipher: a cipher suite selector + * @iftype: a cipher iftype bit mask indicating an allowed cipher usage + * @hdr_len: a length of a security header used the cipher + * @pn_len: a length of a packet number in the security header + * @pn_off: an offset of pn from the beginning of the security header + * @key_idx_off: an offset of key index byte in the security header + * @key_idx_mask: a bit mask of key_idx bits + * @key_idx_shift: a bit shift needed to get key_idx + * key_idx value calculation: + * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift + * @mic_len: a mic length in bytes + */ +struct ieee80211_cipher_scheme { + u32 cipher; + u16 iftype; + u8 hdr_len; + u8 pn_len; + u8 pn_off; + u8 key_idx_off; + u8 key_idx_mask; + u8 key_idx_shift; + u8 mic_len; +}; + +/** * enum set_key_cmd - key command * * Used with the set_key() callback in &struct ieee80211_ops, this @@ -712,16 +1331,130 @@ enum set_key_cmd { }; /** + * enum ieee80211_sta_state - station state + * + * @IEEE80211_STA_NOTEXIST: station doesn't exist at all, + * this is a special state for add/remove transitions + * @IEEE80211_STA_NONE: station exists without special state + * @IEEE80211_STA_AUTH: station is authenticated + * @IEEE80211_STA_ASSOC: station is associated + * @IEEE80211_STA_AUTHORIZED: station is authorized (802.1X) + */ +enum ieee80211_sta_state { + /* NOTE: These need to be ordered correctly! */ + IEEE80211_STA_NOTEXIST, + IEEE80211_STA_NONE, + IEEE80211_STA_AUTH, + IEEE80211_STA_ASSOC, + IEEE80211_STA_AUTHORIZED, +}; + +/** + * enum ieee80211_sta_rx_bandwidth - station RX bandwidth + * @IEEE80211_STA_RX_BW_20: station can only receive 20 MHz + * @IEEE80211_STA_RX_BW_40: station can receive up to 40 MHz + * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz + * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz + * (including 80+80 MHz) + * + * Implementation note: 20 must be zero to be initialized + * correctly, the values must be sorted. + */ +enum ieee80211_sta_rx_bandwidth { + IEEE80211_STA_RX_BW_20 = 0, + IEEE80211_STA_RX_BW_40, + IEEE80211_STA_RX_BW_80, + IEEE80211_STA_RX_BW_160, +}; + +/** + * struct ieee80211_sta_rates - station rate selection table + * + * @rcu_head: RCU head used for freeing the table on update + * @rate: transmit rates/flags to be used by default. + * Overriding entries per-packet is possible by using cb tx control. + */ +struct ieee80211_sta_rates { + struct rcu_head rcu_head; + struct { + s8 idx; + u8 count; + u8 count_cts; + u8 count_rts; + u16 flags; + } rate[IEEE80211_TX_RATE_TABLE_SIZE]; +}; + +/** + * struct ieee80211_sta - station table entry + * + * A station table entry represents a station we are possibly + * communicating with. Since stations are RCU-managed in + * mac80211, any ieee80211_sta pointer you get access to must + * either be protected by rcu_read_lock() explicitly or implicitly, + * or you must take good care to not use such a pointer after a + * call to your sta_remove callback that removed it. + * + * @addr: MAC address + * @aid: AID we assigned to the station if we're an AP + * @supp_rates: Bitmap of supported rates (per band) + * @ht_cap: HT capabilities of this STA; restricted to our own capabilities + * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities + * @wme: indicates whether the STA supports WME. Only valid during AP-mode. + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *), size is determined in hw information. + * @uapsd_queues: bitmap of queues configured for uapsd. Only valid + * if wme is supported. + * @max_sp: max Service Period. Only valid if wme is supported. + * @bandwidth: current bandwidth the station can receive with + * @rx_nss: in HT/VHT, the maximum number of spatial streams the + * station can receive at the moment, changed by operating mode + * notifications and capabilities. The value is only valid after + * the station moves to associated state. + * @smps_mode: current SMPS mode (off, static or dynamic) + * @rates: rate control selection table + * @tdls: indicates whether the STA is a TDLS peer + */ +struct ieee80211_sta { + u32 supp_rates[IEEE80211_NUM_BANDS]; + u8 addr[ETH_ALEN]; + u16 aid; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + bool wme; + u8 uapsd_queues; + u8 max_sp; + u8 rx_nss; + enum ieee80211_sta_rx_bandwidth bandwidth; + enum ieee80211_smps_mode smps_mode; + struct ieee80211_sta_rates __rcu *rates; + bool tdls; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); +}; + +/** * enum sta_notify_cmd - sta notify command * * Used with the sta_notify() callback in &struct ieee80211_ops, this - * indicates addition and removal of a station to station table + * indicates if an associated station made a power state transition. * - * @STA_NOTIFY_ADD: a station was added to the station table - * @STA_NOTIFY_REMOVE: a station being removed from the station table + * @STA_NOTIFY_SLEEP: a station is now sleeping + * @STA_NOTIFY_AWAKE: a sleeping station woke up */ enum sta_notify_cmd { - STA_NOTIFY_ADD, STA_NOTIFY_REMOVE + STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE, +}; + +/** + * struct ieee80211_tx_control - TX control data + * + * @sta: station table entry, this sta pointer may be NULL and + * it is not allowed to copy the pointer, due to RCU. + */ +struct ieee80211_tx_control { + struct ieee80211_sta *sta; }; /** @@ -733,14 +1466,18 @@ enum sta_notify_cmd { * any particular flags. There are some exceptions to this rule, * however, so you are advised to review these flags carefully. * - * @IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE: - * The device only needs to be supplied with a beacon template. - * If you need the host to generate each beacon then don't use - * this flag and call ieee80211_beacon_get() when you need the - * next beacon frame. Note that if you set this flag, you must - * implement the set_tim() callback for powersave mode to work - * properly. - * This flag is only relevant for access-point mode. + * @IEEE80211_HW_HAS_RATE_CONTROL: + * The hardware or firmware includes rate control, and cannot be + * controlled by the stack. As such, no rate control algorithm + * should be instantiated, and the TX rate reported to userspace + * will be taken from the TX status instead of the rate control + * algorithm. + * Note that this requires that the driver implement a number of + * callbacks so it has the correct information, it needs to have + * the @set_rts_threshold callback and must look at the BSS config + * @use_cts_prot for G/N protection, @use_short_slot for slot + * timing in 2.4 GHz and @use_short_preamble for preambles for + * CCK frames. * * @IEEE80211_HW_RX_INCLUDES_FCS: * Indicates that received frames passed to the stack include @@ -752,20 +1489,155 @@ enum sta_notify_cmd { * rely on the host system for such buffering. This option is used * to configure the IEEE 802.11 upper layer to buffer broadcast and * multicast frames when there are power saving stations so that - * the driver can fetch them with ieee80211_get_buffered_bc(). Note - * that not setting this flag works properly only when the - * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because - * otherwise the stack will not know when the DTIM beacon was sent. - * - * @IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED: - * Channels are already configured to the default regulatory domain - * specified in the device's EEPROM + * the driver can fetch them with ieee80211_get_buffered_bc(). + * + * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE: + * Hardware is not capable of short slot operation on the 2.4 GHz band. + * + * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE: + * Hardware is not capable of receiving frames with short preamble on + * the 2.4 GHz band. + * + * @IEEE80211_HW_SIGNAL_UNSPEC: + * Hardware can provide signal values but we don't know its units. We + * expect values between 0 and @max_signal. + * If possible please provide dB or dBm instead. + * + * @IEEE80211_HW_SIGNAL_DBM: + * Hardware gives signal values in dBm, decibel difference from + * one milliwatt. This is the preferred method since it is standardized + * between different devices. @max_signal does not need to be set. + * + * @IEEE80211_HW_SPECTRUM_MGMT: + * Hardware supports spectrum management defined in 802.11h + * Measurement, Channel Switch, Quieting, TPC + * + * @IEEE80211_HW_AMPDU_AGGREGATION: + * Hardware supports 11n A-MPDU aggregation. + * + * @IEEE80211_HW_SUPPORTS_PS: + * Hardware has power save support (i.e. can go to sleep). + * + * @IEEE80211_HW_PS_NULLFUNC_STACK: + * Hardware requires nullfunc frame handling in stack, implies + * stack support for dynamic PS. + * + * @IEEE80211_HW_SUPPORTS_DYNAMIC_PS: + * Hardware has support for dynamic PS. + * + * @IEEE80211_HW_MFP_CAPABLE: + * Hardware supports management frame protection (MFP, IEEE 802.11w). + * + * @IEEE80211_HW_SUPPORTS_STATIC_SMPS: + * Hardware supports static spatial multiplexing powersave, + * ie. can turn off all but one chain even on HT connections + * that should be using more chains. + * + * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS: + * Hardware supports dynamic spatial multiplexing powersave, + * ie. can turn off all but one chain and then wake the rest + * up as required after, for example, rts/cts handshake. + * + * @IEEE80211_HW_SUPPORTS_UAPSD: + * Hardware supports Unscheduled Automatic Power Save Delivery + * (U-APSD) in managed mode. The mode is configured with + * conf_tx() operation. + * + * @IEEE80211_HW_REPORTS_TX_ACK_STATUS: + * Hardware can provide ack status reports of Tx frames to + * the stack. + * + * @IEEE80211_HW_CONNECTION_MONITOR: + * The hardware performs its own connection monitoring, including + * periodic keep-alives to the AP and probing the AP on beacon loss. + * + * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC: + * This device needs to get data from beacon before association (i.e. + * dtim_period). + * + * @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports + * per-station GTKs as used by IBSS RSN or during fast transition. If + * the device doesn't support per-station GTKs, but can be asked not + * to decrypt group addressed frames, then IBSS RSN support is still + * possible but software crypto will be used. Advertise the wiphy flag + * only in that case. + * + * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device + * autonomously manages the PS status of connected stations. When + * this flag is set mac80211 will not trigger PS mode for connected + * stations based on the PM bit of incoming frames. + * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure + * the PS mode of connected stations. + * + * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session + * setup strictly in HW. mac80211 should not attempt to do this in + * software. + * + * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of + * a virtual monitor interface when monitor interfaces are the only + * active interfaces. + * + * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface + * queue mapping in order to use different queues (not just one per AC) + * for different virtual interfaces. See the doc section on HW queue + * control for more details. + * + * @IEEE80211_HW_SUPPORTS_RC_TABLE: The driver supports using a rate + * selection table provided by the rate control algorithm. + * + * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any + * P2P Interface. This will be honoured even if more than one interface + * is supported. + * + * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames + * only, to allow getting TBTT of a DTIM beacon. + * + * @IEEE80211_HW_SUPPORTS_HT_CCK_RATES: Hardware supports mixing HT/CCK rates + * and can cope with CCK rates in an aggregation session (e.g. by not + * using aggregation for such frames.) + * + * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA) + * for a single active channel while using channel contexts. When support + * is not enabled the default action is to disconnect when getting the + * CSA frame. + * + * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a + * channel context on-the-fly. This is needed for channel switch + * on single-channel hardware. It can also be used as an + * optimization in certain channel switch cases with + * multi-channel. */ enum ieee80211_hw_flags { - IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0, + IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, - IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED = 1<<3, + IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, + IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, + IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, + IEEE80211_HW_SIGNAL_DBM = 1<<6, + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7, + IEEE80211_HW_SPECTRUM_MGMT = 1<<8, + IEEE80211_HW_AMPDU_AGGREGATION = 1<<9, + IEEE80211_HW_SUPPORTS_PS = 1<<10, + IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, + IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, + IEEE80211_HW_MFP_CAPABLE = 1<<13, + IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, + IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, + IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, + IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, + IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, + IEEE80211_HW_CONNECTION_MONITOR = 1<<19, + IEEE80211_HW_QUEUE_CONTROL = 1<<20, + IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, + IEEE80211_HW_AP_LINK_PS = 1<<22, + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, + IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, + IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, + IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, + IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, + IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, + IEEE80211_HW_CHANGE_RUNNING_CHANCTX = 1<<29, }; /** @@ -777,14 +1649,11 @@ enum ieee80211_hw_flags { * @wiphy: This points to the &struct wiphy allocated for this * 802.11 PHY. You must fill in the @perm_addr and @dev * members of this structure using SET_IEEE80211_DEV() - * and SET_IEEE80211_PERM_ADDR(). + * and SET_IEEE80211_PERM_ADDR(). Additionally, all supported + * bands (with channels, bitrates) are registered here. * * @conf: &struct ieee80211_conf, device configuration, don't use. * - * @workqueue: single threaded workqueue available for driver use, - * allocated by mac80211 on registration and flushed on - * unregistration. - * * @priv: pointer to private area that was allocated for driver use * along with this structure. * @@ -793,17 +1662,18 @@ enum ieee80211_hw_flags { * @extra_tx_headroom: headroom to reserve in each transmit skb * for use by the driver (e.g. for transmit headers.) * - * @channel_change_time: time (in microseconds) it takes to change channels. - * - * @max_rssi: Maximum value for ssi in RX information, use - * negative numbers for dBm and 0 to indicate no support. + * @extra_beacon_tailroom: tailroom to reserve in each beacon tx skb. + * Can be used by drivers to add extra IEs. * - * @max_signal: like @max_rssi, but for the signal value. + * @max_signal: Maximum value for signal (rssi) in RX information, used + * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB * - * @max_noise: like @max_rssi, but for the noise value. + * @max_listen_interval: max listen interval in units of beacon interval + * that HW supports * * @queues: number of available hardware transmit queues for - * data packets. WMM/QoS requires at least four. + * data packets. WMM/QoS requires at least four, these + * queues need to have configurable access parameters. * * @rate_control_algorithm: rate control algorithm for this hardware. * If unset (NULL), the default algorithm will be used. Must be @@ -811,24 +1681,104 @@ enum ieee80211_hw_flags { * * @vif_data_size: size (in bytes) of the drv_priv data area * within &struct ieee80211_vif. + * @sta_data_size: size (in bytes) of the drv_priv data area + * within &struct ieee80211_sta. + * @chanctx_data_size: size (in bytes) of the drv_priv data area + * within &struct ieee80211_chanctx_conf. + * + * @max_rates: maximum number of alternate rate retry stages the hw + * can handle. + * @max_report_rates: maximum number of alternate rate retry stages + * the hw can report back. + * @max_rate_tries: maximum number of tries for each stage + * + * @max_rx_aggregation_subframes: maximum buffer size (number of + * sub-frames) to be used for A-MPDU block ack receiver + * aggregation. + * This is only relevant if the device has restrictions on the + * number of subframes, if it relies on mac80211 to do reordering + * it shouldn't be set. + * + * @max_tx_aggregation_subframes: maximum number of subframes in an + * aggregate an HT driver will transmit, used by the peer as a + * hint to size its reorder buffer. + * + * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX + * (if %IEEE80211_HW_QUEUE_CONTROL is set) + * + * @radiotap_mcs_details: lists which MCS information can the HW + * reports, by default it is set to _MCS, _GI and _BW but doesn't + * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only + * adding _BW is supported today. + * + * @radiotap_vht_details: lists which VHT MCS information the HW reports, + * the default is _GI | _BANDWIDTH. + * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. + * + * @netdev_features: netdev features to be set in each netdev created + * from this HW. Note only HW checksum features are currently + * compatible with mac80211. Other feature bits will be rejected. + * + * @uapsd_queues: This bitmap is included in (re)association frame to indicate + * for each access category if it is uAPSD trigger-enabled and delivery- + * enabled. Use IEEE80211_WMM_IE_STA_QOSINFO_AC_* to set this bitmap. + * Each bit corresponds to different AC. Value '1' in specific bit means + * that corresponding AC is both trigger- and delivery-enabled. '0' means + * neither enabled. + * + * @uapsd_max_sp_len: maximum number of total buffered frames the WMM AP may + * deliver to a WMM STA during any Service Period triggered by the WMM STA. + * Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values. + * + * @n_cipher_schemes: a size of an array of cipher schemes definitions. + * @cipher_schemes: a pointer to an array of cipher scheme definitions + * supported by HW. */ struct ieee80211_hw { struct ieee80211_conf conf; struct wiphy *wiphy; - struct workqueue_struct *workqueue; const char *rate_control_algorithm; void *priv; u32 flags; unsigned int extra_tx_headroom; - int channel_change_time; + unsigned int extra_beacon_tailroom; int vif_data_size; - u8 queues; - s8 max_rssi; + int sta_data_size; + int chanctx_data_size; + u16 queues; + u16 max_listen_interval; s8 max_signal; - s8 max_noise; + u8 max_rates; + u8 max_report_rates; + u8 max_rate_tries; + u8 max_rx_aggregation_subframes; + u8 max_tx_aggregation_subframes; + u8 offchannel_tx_hw_queue; + u8 radiotap_mcs_details; + u16 radiotap_vht_details; + netdev_features_t netdev_features; + u8 uapsd_queues; + u8 uapsd_max_sp_len; + u8 n_cipher_schemes; + const struct ieee80211_cipher_scheme *cipher_schemes; }; /** + * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy + * + * @wiphy: the &struct wiphy which we want to query + * + * mac80211 drivers can use this to get to their respective + * &struct ieee80211_hw. Drivers wishing to get to their own private + * structure can then access it via hw->priv. Note that mac802111 drivers should + * not use wiphy_priv() to try to get their private driver structure as this + * is already used internally by mac80211. + * + * Return: The mac80211 driver hw struct of @wiphy. + */ +struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy); + +/** * SET_IEEE80211_DEV - set device for 802.11 hardware * * @hw: the &struct ieee80211_hw to set the device for @@ -840,7 +1790,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev } /** - * SET_IEEE80211_PERM_ADDR - set the permanenet MAC address for 802.11 hardware + * SET_IEEE80211_PERM_ADDR - set the permanent MAC address for 802.11 hardware * * @hw: the &struct ieee80211_hw to set the MAC address for * @addr: the address to set @@ -850,6 +1800,43 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } +static inline struct ieee80211_rate * +ieee80211_get_tx_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c) +{ + if (WARN_ON_ONCE(c->control.rates[0].idx < 0)) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx]; +} + +static inline struct ieee80211_rate * +ieee80211_get_rts_cts_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c) +{ + if (c->control.rts_cts_rate_idx < 0) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx]; +} + +static inline struct ieee80211_rate * +ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c, int idx) +{ + if (c->control.rates[idx + 1].idx < 0) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx]; +} + +/** + * ieee80211_free_txskb - free TX skb + * @hw: the hardware + * @skb: the skb + * + * Free a transmit skb. Use this funtion when some failure + * to transmit happened and thus status cannot be reported. + */ +void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); + /** * DOC: Hardware crypto acceleration * @@ -858,16 +1845,12 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) * * The set_key() callback in the &struct ieee80211_ops for a given * device is called to enable hardware acceleration of encryption and - * decryption. The callback takes an @address parameter that will be - * the broadcast address for default keys, the other station's hardware - * address for individual keys or the zero address for keys that will - * be used only for transmission. + * decryption. The callback takes a @sta parameter that will be NULL + * for default keys or keys used for transmission only, or point to + * the station information for the peer for individual keys. * Multiple transmission keys with the same key index may be used when * VLANs are configured for an access point. * - * The @local_address parameter will always be set to our own address, - * this is only relevant if you support multiple local addresses. - * * When transmitting, the TX control data will use the @hw_key_idx * selected by the driver by modifying the &struct ieee80211_key_conf * pointed to by the @key parameter to the set_key() function. @@ -888,6 +1871,166 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) * parameter is guaranteed to be valid until another call to set_key() * removes it, but it can only be used as a cookie to differentiate * keys. + * + * In TKIP some HW need to be provided a phase 1 key, for RX decryption + * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key + * handler. + * The update_tkip_key() call updates the driver with the new phase 1 key. + * This happens every time the iv16 wraps around (every 65536 packets). The + * set_key() call will happen only once for each key (unless the AP did + * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is + * provided by update_tkip_key only. The trigger that makes mac80211 call this + * handler is software decryption with wrap around of iv16. + * + * The set_default_unicast_key() call updates the default WEP key index + * configured to the hardware for WEP encryption type. This is required + * for devices that support offload of data packets (e.g. ARP responses). + */ + +/** + * DOC: Powersave support + * + * mac80211 has support for various powersave implementations. + * + * First, it can support hardware that handles all powersaving by itself, + * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware + * flag. In that case, it will be told about the desired powersave mode + * with the %IEEE80211_CONF_PS flag depending on the association status. + * The hardware must take care of sending nullfunc frames when necessary, + * i.e. when entering and leaving powersave mode. The hardware is required + * to look at the AID in beacons and signal to the AP that it woke up when + * it finds traffic directed to it. + * + * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in + * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused + * with hardware wakeup and sleep states. Driver is responsible for waking + * up the hardware before issuing commands to the hardware and putting it + * back to sleep at appropriate times. + * + * When PS is enabled, hardware needs to wakeup for beacons and receive the + * buffered multicast/broadcast frames after the beacon. Also it must be + * possible to send frames and receive the acknowledment frame. + * + * Other hardware designs cannot send nullfunc frames by themselves and also + * need software support for parsing the TIM bitmap. This is also supported + * by mac80211 by combining the %IEEE80211_HW_SUPPORTS_PS and + * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still + * required to pass up beacons. The hardware is still required to handle + * waking up for multicast traffic; if it cannot the driver must handle that + * as best as it can, mac80211 is too slow to do that. + * + * Dynamic powersave is an extension to normal powersave in which the + * hardware stays awake for a user-specified period of time after sending a + * frame so that reply frames need not be buffered and therefore delayed to + * the next wakeup. It's compromise of getting good enough latency when + * there's data traffic and still saving significantly power in idle + * periods. + * + * Dynamic powersave is simply supported by mac80211 enabling and disabling + * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS + * flag and mac80211 will handle everything automatically. Additionally, + * hardware having support for the dynamic PS feature may set the + * %IEEE80211_HW_SUPPORTS_DYNAMIC_PS flag to indicate that it can support + * dynamic PS mode itself. The driver needs to look at the + * @dynamic_ps_timeout hardware configuration value and use it that value + * whenever %IEEE80211_CONF_PS is set. In this case mac80211 will disable + * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS + * enabled whenever user has enabled powersave. + * + * Driver informs U-APSD client support by enabling + * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the + * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS + * Nullfunc frames and stay awake until the service period has ended. To + * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames + * from that AC are transmitted with powersave enabled. + * + * Note: U-APSD client mode is not yet supported with + * %IEEE80211_HW_PS_NULLFUNC_STACK. + */ + +/** + * DOC: Beacon filter support + * + * Some hardware have beacon filter support to reduce host cpu wakeups + * which will reduce system power consumption. It usually works so that + * the firmware creates a checksum of the beacon but omits all constantly + * changing elements (TSF, TIM etc). Whenever the checksum changes the + * beacon is forwarded to the host, otherwise it will be just dropped. That + * way the host will only receive beacons where some relevant information + * (for example ERP protection or WMM settings) have changed. + * + * Beacon filter support is advertised with the %IEEE80211_VIF_BEACON_FILTER + * interface capability. The driver needs to enable beacon filter support + * whenever power save is enabled, that is %IEEE80211_CONF_PS is set. When + * power save is enabled, the stack will not check for beacon loss and the + * driver needs to notify about loss of beacons with ieee80211_beacon_loss(). + * + * The time (or number of beacons missed) until the firmware notifies the + * driver of a beacon loss event (which in turn causes the driver to call + * ieee80211_beacon_loss()) should be configurable and will be controlled + * by mac80211 and the roaming algorithm in the future. + * + * Since there may be constantly changing information elements that nothing + * in the software stack cares about, we will, in the future, have mac80211 + * tell the driver which information elements are interesting in the sense + * that we want to see changes in them. This will include + * - a list of information element IDs + * - a list of OUIs for the vendor information element + * + * Ideally, the hardware would filter out any beacons without changes in the + * requested elements, but if it cannot support that it may, at the expense + * of some efficiency, filter out only a subset. For example, if the device + * doesn't support checking for OUIs it should pass up all changes in all + * vendor information elements. + * + * Note that change, for the sake of simplification, also includes information + * elements appearing or disappearing from the beacon. + * + * Some hardware supports an "ignore list" instead, just make sure nothing + * that was requested is on the ignore list, and include commonly changing + * information element IDs in the ignore list, for example 11 (BSS load) and + * the various vendor-assigned IEs with unknown contents (128, 129, 133-136, + * 149, 150, 155, 156, 173, 176, 178, 179, 219); for forward compatibility + * it could also include some currently unused IDs. + * + * + * In addition to these capabilities, hardware should support notifying the + * host of changes in the beacon RSSI. This is relevant to implement roaming + * when no traffic is flowing (when traffic is flowing we see the RSSI of + * the received data packets). This can consist in notifying the host when + * the RSSI changes significantly or when it drops below or rises above + * configurable thresholds. In the future these thresholds will also be + * configured by mac80211 (which gets them from userspace) to implement + * them as the roaming algorithm requires. + * + * If the hardware cannot implement this, the driver should ask it to + * periodically pass beacon frames to the host so that software can do the + * signal strength threshold checking. + */ + +/** + * DOC: Spatial multiplexing power save + * + * SMPS (Spatial multiplexing power save) is a mechanism to conserve + * power in an 802.11n implementation. For details on the mechanism + * and rationale, please refer to 802.11 (as amended by 802.11n-2009) + * "11.2.3 SM power save". + * + * The mac80211 implementation is capable of sending action frames + * to update the AP about the station's SMPS mode, and will instruct + * the driver to enter the specific mode. It will also announce the + * requested SMPS mode during the association handshake. Hardware + * support for this feature is required, and can be indicated by + * hardware flags. + * + * The default mode will be "automatic", which nl80211/cfg80211 + * defines to be dynamic SMPS in (regular) powersave, and SMPS + * turned off otherwise. + * + * To support this feature, the driver must set the appropriate + * hardware support flags, and handle the SMPS flag to the config() + * operation. It will then with this mechanism be instructed to + * enter the requested SMPS mode while associated to an HT AP. */ /** @@ -903,18 +2046,183 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) * the driver's configure_filter() function which frames should be * passed to mac80211 and which should be filtered out. * - * The configure_filter() callback is invoked with the parameters - * @mc_count and @mc_list for the combined multicast address list - * of all virtual interfaces, @changed_flags telling which flags - * were changed and @total_flags with the new flag states. + * Before configure_filter() is invoked, the prepare_multicast() + * callback is invoked with the parameters @mc_count and @mc_list + * for the combined multicast address list of all virtual interfaces. + * It's use is optional, and it returns a u64 that is passed to + * configure_filter(). Additionally, configure_filter() has the + * arguments @changed_flags telling which flags were changed and + * @total_flags with the new flag states. * * If your device has no multicast address filters your driver will * need to check both the %FIF_ALLMULTI flag and the @mc_count * parameter to see whether multicast frames should be accepted * or dropped. * - * All unsupported flags in @total_flags must be cleared, i.e. you - * should clear all bits except those you honoured. + * All unsupported flags in @total_flags must be cleared. + * Hardware does not support a flag if it is incapable of _passing_ + * the frame to the stack. Otherwise the driver must ignore + * the flag, but not clear it. + * You must _only_ clear the flag (announce no support for the + * flag to mac80211) if you are not able to pass the packet type + * to the stack (so the hardware always filters it). + * So for example, you should clear @FIF_CONTROL, if your hardware + * always filters control frames. If your hardware always passes + * control frames to the kernel and is incapable of filtering them, + * you do _not_ clear the @FIF_CONTROL flag. + * This rule applies to all other FIF flags as well. + */ + +/** + * DOC: AP support for powersaving clients + * + * In order to implement AP and P2P GO modes, mac80211 has support for + * client powersaving, both "legacy" PS (PS-Poll/null data) and uAPSD. + * There currently is no support for sAPSD. + * + * There is one assumption that mac80211 makes, namely that a client + * will not poll with PS-Poll and trigger with uAPSD at the same time. + * Both are supported, and both can be used by the same client, but + * they can't be used concurrently by the same client. This simplifies + * the driver code. + * + * The first thing to keep in mind is that there is a flag for complete + * driver implementation: %IEEE80211_HW_AP_LINK_PS. If this flag is set, + * mac80211 expects the driver to handle most of the state machine for + * powersaving clients and will ignore the PM bit in incoming frames. + * Drivers then use ieee80211_sta_ps_transition() to inform mac80211 of + * stations' powersave transitions. In this mode, mac80211 also doesn't + * handle PS-Poll/uAPSD. + * + * In the mode without %IEEE80211_HW_AP_LINK_PS, mac80211 will check the + * PM bit in incoming frames for client powersave transitions. When a + * station goes to sleep, we will stop transmitting to it. There is, + * however, a race condition: a station might go to sleep while there is + * data buffered on hardware queues. If the device has support for this + * it will reject frames, and the driver should give the frames back to + * mac80211 with the %IEEE80211_TX_STAT_TX_FILTERED flag set which will + * cause mac80211 to retry the frame when the station wakes up. The + * driver is also notified of powersave transitions by calling its + * @sta_notify callback. + * + * When the station is asleep, it has three choices: it can wake up, + * it can PS-Poll, or it can possibly start a uAPSD service period. + * Waking up is implemented by simply transmitting all buffered (and + * filtered) frames to the station. This is the easiest case. When + * the station sends a PS-Poll or a uAPSD trigger frame, mac80211 + * will inform the driver of this with the @allow_buffered_frames + * callback; this callback is optional. mac80211 will then transmit + * the frames as usual and set the %IEEE80211_TX_CTL_NO_PS_BUFFER + * on each frame. The last frame in the service period (or the only + * response to a PS-Poll) also has %IEEE80211_TX_STATUS_EOSP set to + * indicate that it ends the service period; as this frame must have + * TX status report it also sets %IEEE80211_TX_CTL_REQ_TX_STATUS. + * When TX status is reported for this frame, the service period is + * marked has having ended and a new one can be started by the peer. + * + * Additionally, non-bufferable MMPDUs can also be transmitted by + * mac80211 with the %IEEE80211_TX_CTL_NO_PS_BUFFER set in them. + * + * Another race condition can happen on some devices like iwlwifi + * when there are frames queued for the station and it wakes up + * or polls; the frames that are already queued could end up being + * transmitted first instead, causing reordering and/or wrong + * processing of the EOSP. The cause is that allowing frames to be + * transmitted to a certain station is out-of-band communication to + * the device. To allow this problem to be solved, the driver can + * call ieee80211_sta_block_awake() if frames are buffered when it + * is notified that the station went to sleep. When all these frames + * have been filtered (see above), it must call the function again + * to indicate that the station is no longer blocked. + * + * If the driver buffers frames in the driver for aggregation in any + * way, it must use the ieee80211_sta_set_buffered() call when it is + * notified of the station going to sleep to inform mac80211 of any + * TIDs that have frames buffered. Note that when a station wakes up + * this information is reset (hence the requirement to call it when + * informed of the station going to sleep). Then, when a service + * period starts for any reason, @release_buffered_frames is called + * with the number of frames to be released and which TIDs they are + * to come from. In this case, the driver is responsible for setting + * the EOSP (for uAPSD) and MORE_DATA bits in the released frames, + * to help the @more_data parameter is passed to tell the driver if + * there is more data on other TIDs -- the TIDs to release frames + * from are ignored since mac80211 doesn't know how many frames the + * buffers for those TIDs contain. + * + * If the driver also implement GO mode, where absence periods may + * shorten service periods (or abort PS-Poll responses), it must + * filter those response frames except in the case of frames that + * are buffered in the driver -- those must remain buffered to avoid + * reordering. Because it is possible that no frames are released + * in this case, the driver must call ieee80211_sta_eosp() + * to indicate to mac80211 that the service period ended anyway. + * + * Finally, if frames from multiple TIDs are released from mac80211 + * but the driver might reorder them, it must clear & set the flags + * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP) + * and also take care of the EOSP and MORE_DATA bits in the frame. + * The driver may also use ieee80211_sta_eosp() in this case. + * + * Note that if the driver ever buffers frames other than QoS-data + * frames, it must take care to never send a non-QoS-data frame as + * the last frame in a service period, adding a QoS-nulldata frame + * after a non-QoS-data frame if needed. + */ + +/** + * DOC: HW queue control + * + * Before HW queue control was introduced, mac80211 only had a single static + * assignment of per-interface AC software queues to hardware queues. This + * was problematic for a few reasons: + * 1) off-channel transmissions might get stuck behind other frames + * 2) multiple virtual interfaces couldn't be handled correctly + * 3) after-DTIM frames could get stuck behind other frames + * + * To solve this, hardware typically uses multiple different queues for all + * the different usages, and this needs to be propagated into mac80211 so it + * won't have the same problem with the software queues. + * + * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability + * flag that tells it that the driver implements its own queue control. To do + * so, the driver will set up the various queues in each &struct ieee80211_vif + * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will + * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and + * if necessary will queue the frame on the right software queue that mirrors + * the hardware queue. + * Additionally, the driver has to then use these HW queue IDs for the queue + * management functions (ieee80211_stop_queue() et al.) + * + * The driver is free to set up the queue mappings as needed, multiple virtual + * interfaces may map to the same hardware queues if needed. The setup has to + * happen during add_interface or change_interface callbacks. For example, a + * driver supporting station+station and station+AP modes might decide to have + * 10 hardware queues to handle different scenarios: + * + * 4 AC HW queues for 1st vif: 0, 1, 2, 3 + * 4 AC HW queues for 2nd vif: 4, 5, 6, 7 + * after-DTIM queue for AP: 8 + * off-channel queue: 9 + * + * It would then set up the hardware like this: + * hw.offchannel_tx_hw_queue = 9 + * + * and the first virtual interface that is added as follows: + * vif.hw_queue[IEEE80211_AC_VO] = 0 + * vif.hw_queue[IEEE80211_AC_VI] = 1 + * vif.hw_queue[IEEE80211_AC_BE] = 2 + * vif.hw_queue[IEEE80211_AC_BK] = 3 + * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE + * and the second virtual interface with 4-7. + * + * If queue 6 gets full, for example, mac80211 would only stop the second + * virtual interface's BE queue since virtual interface queues are per AC. + * + * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE + * whenever the queue is not used (i.e. the interface is not in AP mode) if the + * queue could potentially be shared since mac80211 will look at cab_queue when + * a queue is stopped/woken even if the interface is not in AP mode. */ /** @@ -945,10 +2253,15 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) * mac80211 needs to do and the amount of CPU wakeups, so you should * honour this flag if possible. * - * @FIF_CONTROL: pass control frames, if PROMISC_IN_BSS is not set then - * only those addressed to this station + * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS + * is not set then only those addressed to this station. * * @FIF_OTHER_BSS: pass frames destined to other BSSes + * + * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only + * those addressed to this station. + * + * @FIF_PROBE_REQ: pass probe request frames */ enum ieee80211_filter_flags { FIF_PROMISC_IN_BSS = 1<<0, @@ -958,6 +2271,8 @@ enum ieee80211_filter_flags { FIF_BCN_PRBRESP_PROMISC = 1<<4, FIF_CONTROL = 1<<5, FIF_OTHER_BSS = 1<<6, + FIF_PSPOLL = 1<<7, + FIF_PROBE_REQ = 1<<8, }; /** @@ -965,12 +2280,86 @@ enum ieee80211_filter_flags { * * These flags are used with the ampdu_action() callback in * &struct ieee80211_ops to indicate which action is needed. - * @IEEE80211_AMPDU_RX_START: start Rx aggregation - * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation + * + * Note that drivers MUST be able to deal with a TX aggregation + * session being stopped even before they OK'ed starting it by + * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer + * might receive the addBA frame and send a delBA right away! + * + * @IEEE80211_AMPDU_RX_START: start RX aggregation + * @IEEE80211_AMPDU_RX_STOP: stop RX aggregation + * @IEEE80211_AMPDU_TX_START: start TX aggregation + * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational + * @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting + * queued packets, now unaggregated. After all packets are transmitted the + * driver has to call ieee80211_stop_tx_ba_cb_irqsafe(). + * @IEEE80211_AMPDU_TX_STOP_FLUSH: stop TX aggregation and flush all packets, + * called when the station is removed. There's no need or reason to call + * ieee80211_stop_tx_ba_cb_irqsafe() in this case as mac80211 assumes the + * session is gone and removes the station. + * @IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: called when TX aggregation is stopped + * but the driver hasn't called ieee80211_stop_tx_ba_cb_irqsafe() yet and + * now the connection is dropped and the station will be removed. Drivers + * should clean up and drop remaining packets when this is called. */ enum ieee80211_ampdu_mlme_action { IEEE80211_AMPDU_RX_START, IEEE80211_AMPDU_RX_STOP, + IEEE80211_AMPDU_TX_START, + IEEE80211_AMPDU_TX_STOP_CONT, + IEEE80211_AMPDU_TX_STOP_FLUSH, + IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, + IEEE80211_AMPDU_TX_OPERATIONAL, +}; + +/** + * enum ieee80211_frame_release_type - frame release reason + * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll + * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to + * frame received on trigger-enabled AC + */ +enum ieee80211_frame_release_type { + IEEE80211_FRAME_RELEASE_PSPOLL, + IEEE80211_FRAME_RELEASE_UAPSD, +}; + +/** + * enum ieee80211_rate_control_changed - flags to indicate what changed + * + * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit + * to this station changed. The actual bandwidth is in the station + * information -- for HT20/40 the IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * flag changes, for HT and VHT the bandwidth field changes. + * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. + * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer + * changed (in IBSS mode) due to discovering more information about + * the peer. + * @IEEE80211_RC_NSS_CHANGED: N_SS (number of spatial streams) was changed + * by the peer + */ +enum ieee80211_rate_control_changed { + IEEE80211_RC_BW_CHANGED = BIT(0), + IEEE80211_RC_SMPS_CHANGED = BIT(1), + IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2), + IEEE80211_RC_NSS_CHANGED = BIT(3), +}; + +/** + * enum ieee80211_roc_type - remain on channel type + * + * With the support for multi channel contexts and multi channel operations, + * remain on channel operations might be limited/deferred/aborted by other + * flows/operations which have higher priority (and vise versa). + * Specifying the ROC type can be used by devices to prioritize the ROC + * operations compared to other operations/flows. + * + * @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC. + * @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required + * for sending managment frames offchannel. + */ +enum ieee80211_roc_type { + IEEE80211_ROC_TYPE_NORMAL = 0, + IEEE80211_ROC_TYPE_MGMT_TX, }; /** @@ -983,8 +2372,9 @@ enum ieee80211_ampdu_mlme_action { * @tx: Handler that 802.11 module calls for each transmitted frame. * skb contains the buffer starting from the IEEE 802.11 header. * The low-level driver should send the frame out based on - * configuration in the TX control data. Must be implemented and - * atomic. + * configuration in the TX control data. This handler should, + * preferably, never fail and stop queues appropriately. + * Must be atomic. * * @start: Called before the first netdevice attached to the hardware * is enabled. This should turn on the hardware and must turn on @@ -994,24 +2384,52 @@ enum ieee80211_ampdu_mlme_action { * When the device is started it should not have a MAC address * to avoid acknowledging frames before a non-monitor device * is added. - * Must be implemented. + * Must be implemented and can sleep. * * @stop: Called after last netdevice attached to the hardware * is disabled. This should turn off the hardware (at least * it must turn off frame reception.) * May be called right after add_interface if that rejects - * an interface. - * Must be implemented. + * an interface. If you added any work onto the mac80211 workqueue + * you should ensure to cancel it on this callback. + * Must be implemented and can sleep. + * + * @suspend: Suspend the device; mac80211 itself will quiesce before and + * stop transmitting and doing any other configuration, and then + * ask the device to suspend. This is only invoked when WoWLAN is + * configured, otherwise the device is deconfigured completely and + * reconfigured at resume time. + * The driver may also impose special conditions under which it + * wants to use the "normal" suspend (deconfigure), say if it only + * supports WoWLAN when the device is associated. In this case, it + * must return 1 from this function. + * + * @resume: If WoWLAN was configured, this indicates that mac80211 is + * now resuming its operation, after this the device must be fully + * functional again. If this returns an error, the only way out is + * to also unregister the device. If it returns 1, then mac80211 + * will also go through the regular complete restart on resume. + * + * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is + * modified. The reason is that device_set_wakeup_enable() is + * supposed to be called when the configuration changes, not only + * in suspend(). * * @add_interface: Called when a netdevice attached to the hardware is - * enabled. Because it is not called for monitor mode devices, @open + * enabled. Because it is not called for monitor mode devices, @start * and @stop must be implemented. * The driver should perform any initialization it needs before * the device can be enabled. The initial configuration for the * interface is given in the conf parameter. * The callback may refuse to add an interface by returning a * negative error code (which will be seen in userspace.) - * Must be implemented. + * Must be implemented and can sleep. + * + * @change_interface: Called when a netdevice changes type. This callback + * is optional, but only if it is supported can interface types be + * switched while the interface is UP. The callback may sleep. + * Note that while an interface is being switched, it will not be + * found by the interface iteration callbacks. * * @remove_interface: Notifies a driver that an interface is going down. * The @stop callback is called after this if it is the last interface @@ -1020,149 +2438,581 @@ enum ieee80211_ampdu_mlme_action { * must be cleared so the device no longer acknowledges packets, * the mac_addr member of the conf structure is, however, set to the * MAC address of the device going away. - * Hence, this callback must be implemented. + * Hence, this callback must be implemented. It can sleep. * * @config: Handler for configuration requests. IEEE 802.11 code calls this * function to change hardware configuration, e.g., channel. - * - * @config_interface: Handler for configuration requests related to interfaces - * (e.g. BSSID changes.) + * This function should never fail but returns a negative error code + * if it does. The callback can sleep. * * @bss_info_changed: Handler for configuration requests related to BSS * parameters that may vary during BSS's lifespan, and may affect low * level driver (e.g. assoc/disassoc status, erp parameters). * This function should not be used if no BSS has been set, unless * for association indication. The @changed parameter indicates which - * of the bss parameters has changed when a call is made. This callback - * has to be atomic. + * of the bss parameters has changed when a call is made. The callback + * can sleep. + * + * @prepare_multicast: Prepare for multicast filter configuration. + * This callback is optional, and its return value is passed + * to configure_filter(). This callback must be atomic. * * @configure_filter: Configure the device's RX filter. * See the section "Frame filtering" for more information. - * This callback must be implemented and atomic. + * This callback must be implemented and can sleep. * - * @set_tim: Set TIM bit. If the hardware/firmware takes care of beacon - * generation (that is, %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is set) - * mac80211 calls this function when a TIM bit must be set or cleared - * for a given AID. Must be atomic. + * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit + * must be set or cleared for a given STA. Must be atomic. * * @set_key: See the section "Hardware crypto acceleration" - * This callback can sleep, and is only called between add_interface - * and remove_interface calls, i.e. while the interface with the - * given local_address is enabled. + * This callback is only called between add_interface and + * remove_interface calls, i.e. while the given virtual interface + * is enabled. + * Returns a negative error code if the key can't be added. + * The callback can sleep. + * + * @update_tkip_key: See the section "Hardware crypto acceleration" + * This callback will be called in the context of Rx. Called for drivers + * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. + * The callback must be atomic. + * + * @set_rekey_data: If the device supports GTK rekeying, for example while the + * host is suspended, it can assign this callback to retrieve the data + * necessary to do GTK rekeying, this is the KEK, KCK and replay counter. + * After rekeying was done it should (for example during resume) notify + * userspace of the new replay counter using ieee80211_gtk_rekey_notify(). + * + * @set_default_unicast_key: Set the default (unicast) key index, useful for + * WEP when the device sends data packets autonomously, e.g. for ARP + * offloading. The index can be 0-3, or -1 for unsetting it. * * @hw_scan: Ask the hardware to service the scan request, no need to start - * the scan state machine in stack. - * - * @get_stats: return low-level statistics + * the scan state machine in stack. The scan must honour the channel + * configuration done by the regulatory agent in the wiphy's + * registered bands. The hardware (or the driver) needs to make sure + * that power save is disabled. + * The @req ie/ie_len members are rewritten by mac80211 to contain the + * entire IEs after the SSID, so that drivers need not look at these + * at all but just send them after the SSID -- mac80211 includes the + * (extended) supported rates and HT information (where applicable). + * When the scan finishes, ieee80211_scan_completed() must be called; + * note that it also must be called when the scan cannot finish due to + * any error unless this callback returned a negative error code. + * The callback can sleep. + * + * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan. + * The driver should ask the hardware to cancel the scan (if possible), + * but the scan will be completed only after the driver will call + * ieee80211_scan_completed(). + * This callback is needed for wowlan, to prevent enqueueing a new + * scan_work after the low-level driver was already suspended. + * The callback can sleep. + * + * @sched_scan_start: Ask the hardware to start scanning repeatedly at + * specific intervals. The driver must call the + * ieee80211_sched_scan_results() function whenever it finds results. + * This process will continue until sched_scan_stop is called. + * + * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan. + * In this case, ieee80211_sched_scan_stopped() must not be called. + * + * @sw_scan_start: Notifier function that is called just before a software scan + * is started. Can be NULL, if the driver doesn't need this notification. + * The callback can sleep. + * + * @sw_scan_complete: Notifier function that is called just after a + * software scan finished. Can be NULL, if the driver doesn't need + * this notification. + * The callback can sleep. + * + * @get_stats: Return low-level statistics. + * Returns zero if statistics are available. + * The callback can sleep. * * @get_tkip_seq: If your device implements TKIP encryption in hardware this * callback should be provided to read the TKIP transmit IVs (both IV32 * and IV16) for the given key from hardware. + * The callback must be atomic. * - * @set_rts_threshold: Configuration of RTS threshold (if device needs it) - * - * @set_frag_threshold: Configuration of fragmentation threshold. Assign this if - * the device does fragmentation by itself; if this method is assigned then - * the stack will not do fragmentation. - * - * @set_retry_limit: Configuration of retry limits (if device needs it) + * @set_frag_threshold: Configuration of fragmentation threshold. Assign this + * if the device does fragmentation by itself; if this callback is + * implemented then the stack will not do fragmentation. + * The callback can sleep. * - * @sta_notify: Notifies low level driver about addition or removal - * of assocaited station or AP. + * @set_rts_threshold: Configuration of RTS threshold (if device needs it) + * The callback can sleep. + * + * @sta_add: Notifies low level driver about addition of an associated station, + * AP, IBSS/WDS/mesh peer etc. This callback can sleep. + * + * @sta_remove: Notifies low level driver about removal of an associated + * station, AP, IBSS/WDS/mesh peer etc. Note that after the callback + * returns it isn't safe to use the pointer, not even RCU protected; + * no RCU grace period is guaranteed between returning here and freeing + * the station. See @sta_pre_rcu_remove if needed. + * This callback can sleep. + * + * @sta_add_debugfs: Drivers can use this callback to add debugfs files + * when a station is added to mac80211's station list. This callback + * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS + * conditional. This callback can sleep. + * + * @sta_remove_debugfs: Remove the debugfs files which were added using + * @sta_add_debugfs. This callback can sleep. + * + * @sta_notify: Notifies low level driver about power state transition of an + * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating + * in AP mode, this callback will not be called when the flag + * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic. + * + * @sta_state: Notifies low level driver about state transition of a + * station (which can be the AP, a client, IBSS/WDS/mesh peer etc.) + * This callback is mutually exclusive with @sta_add/@sta_remove. + * It must not fail for down transitions but may fail for transitions + * up the list of states. Also note that after the callback returns it + * isn't safe to use the pointer, not even RCU protected - no RCU grace + * period is guaranteed between returning here and freeing the station. + * See @sta_pre_rcu_remove if needed. + * The callback can sleep. + * + * @sta_pre_rcu_remove: Notify driver about station removal before RCU + * synchronisation. This is useful if a driver needs to have station + * pointers protected using RCU, it can then use this call to clear + * the pointers instead of waiting for an RCU grace period to elapse + * in @sta_state. + * The callback can sleep. + * + * @sta_rc_update: Notifies the driver of changes to the bitrates that can be + * used to transmit to the station. The changes are advertised with bits + * from &enum ieee80211_rate_control_changed and the values are reflected + * in the station data. This callback should only be used when the driver + * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since + * otherwise the rate control algorithm is notified directly. + * Must be atomic. * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), - * bursting) for a hardware TX queue. The @queue parameter uses the - * %IEEE80211_TX_QUEUE_* constants. Must be atomic. - * - * @get_tx_stats: Get statistics of the current TX queue status. This is used - * to get number of currently queued packets (queue length), maximum queue - * size (limit), and total number of packets sent using each TX queue - * (count). This information is used for WMM to find out which TX - * queues have room for more packets and by hostapd to provide - * statistics about the current queueing state to external programs. + * bursting) for a hardware TX queue. + * Returns a negative error code on failure. + * The callback can sleep. * * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, - * this is only used for IBSS mode debugging and, as such, is not a - * required function. Must be atomic. + * this is only used for IBSS mode BSSID merging and debugging. Is not a + * required function. + * The callback can sleep. + * + * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware. + * Currently, this is only used for IBSS mode debugging. Is not a + * required function. + * The callback can sleep. * * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize * with other STAs in the IBSS. This is only used in IBSS mode. This * function is optional if the firmware/hardware takes full care of * TSF synchronization. - * - * @beacon_update: Setup beacon data for IBSS beacons. Unlike access point, - * IBSS uses a fixed beacon frame which is configured using this - * function. - * If the driver returns success (0) from this callback, it owns - * the skb. That means the driver is responsible to kfree_skb() it. - * The control structure is not dynamically allocated. That means the - * driver does not own the pointer and if it needs it somewhere - * outside of the context of this function, it must copy it - * somewhere else. - * This handler is required only for IBSS mode. + * The callback can sleep. * * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. * This is needed only for IBSS mode and the result of this function is * used to determine whether to reply to Probe Requests. - * - * @conf_ht: Configures low level driver with 802.11n HT data. Must be atomic. + * Returns non-zero if this device sent the last beacon. + * The callback can sleep. * * @ampdu_action: Perform a certain A-MPDU action * The RA/TID combination determines the destination and TID we want * the ampdu action to be performed for. The action is defined through * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) - * is the first frame we expect to perform the action on. + * is the first frame we expect to perform the action on. Notice + * that TX/RX_STOP can pass NULL for this parameter. + * The @buf_size parameter is only valid when the action is set to + * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder + * buffer size (number of subframes) for this session -- the driver + * may neither send aggregates containing more subframes than this + * nor send aggregates in a way that lost frames would exceed the + * buffer size. If just limiting the aggregate size, this would be + * possible with a buf_size of 8: + * - TX: 1.....7 + * - RX: 2....7 (lost frame #1) + * - TX: 8..1... + * which is invalid since #1 was now re-transmitted well past the + * buffer size of 8. Correct ways to retransmit #1 would be: + * - TX: 1 or 18 or 81 + * Even "189" would be wrong since 1 could be lost again. + * + * Returns a negative error code on failure. + * The callback can sleep. + * + * @get_survey: Return per-channel survey information + * + * @rfkill_poll: Poll rfkill hardware state. If you need this, you also + * need to set wiphy->rfkill_poll to %true before registration, + * and need to call wiphy_rfkill_set_hw_state() in the callback. + * The callback can sleep. + * + * @set_coverage_class: Set slot time for given coverage class as specified + * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout + * accordingly. This callback is not required and may sleep. + * + * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may + * be %NULL. The callback can sleep. + * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep. + * + * @flush: Flush all pending frames from the hardware queue, making sure + * that the hardware queues are empty. The @queues parameter is a bitmap + * of queues to flush, which is useful if different virtual interfaces + * use different hardware queues; it may also indicate all queues. + * If the parameter @drop is set to %true, pending frames may be dropped. + * Note that vif can be NULL. + * The callback can sleep. + * + * @channel_switch: Drivers that need (or want) to offload the channel + * switch operation for CSAs received from the AP may implement this + * callback. They must then call ieee80211_chswitch_done() to indicate + * completion of the channel switch. + * + * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. + * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may + * reject TX/RX mask combinations they cannot support by returning -EINVAL + * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). + * + * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). + * + * @remain_on_channel: Starts an off-channel period on the given channel, must + * call back to ieee80211_ready_on_channel() when on that channel. Note + * that normal channel traffic is not stopped as this is intended for hw + * offload. Frames to transmit on the off-channel channel are transmitted + * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the + * duration (which will always be non-zero) expires, the driver must call + * ieee80211_remain_on_channel_expired(). + * Note that this callback may be called while the device is in IDLE and + * must be accepted in this case. + * This callback may sleep. + * @cancel_remain_on_channel: Requests that an ongoing off-channel period is + * aborted before it expires. This callback may sleep. + * + * @set_ringparam: Set tx and rx ring sizes. + * + * @get_ringparam: Get tx and rx ring current and maximum sizes. + * + * @tx_frames_pending: Check if there is any pending frame in the hardware + * queues before entering power save. + * + * @set_bitrate_mask: Set a mask of rates to be used for rate control selection + * when transmitting a frame. Currently only legacy rates are handled. + * The callback can sleep. + * @rssi_callback: Notify driver when the average RSSI goes above/below + * thresholds that were registered previously. The callback can sleep. + * + * @release_buffered_frames: Release buffered frames according to the given + * parameters. In the case where the driver buffers some frames for + * sleeping stations mac80211 will use this callback to tell the driver + * to release some frames, either for PS-poll or uAPSD. + * Note that if the @more_data parameter is %false the driver must check + * if there are more frames on the given TIDs, and if there are more than + * the frames being released then it must still set the more-data bit in + * the frame. If the @more_data parameter is %true, then of course the + * more-data bit must always be set. + * The @tids parameter tells the driver which TIDs to release frames + * from, for PS-poll it will always have only a single bit set. + * In the case this is used for a PS-poll initiated release, the + * @num_frames parameter will always be 1 so code can be shared. In + * this case the driver must also set %IEEE80211_TX_STATUS_EOSP flag + * on the TX status (and must report TX status) so that the PS-poll + * period is properly ended. This is used to avoid sending multiple + * responses for a retried PS-poll frame. + * In the case this is used for uAPSD, the @num_frames parameter may be + * bigger than one, but the driver may send fewer frames (it must send + * at least one, however). In this case it is also responsible for + * setting the EOSP flag in the QoS header of the frames. Also, when the + * service period ends, the driver must set %IEEE80211_TX_STATUS_EOSP + * on the last frame in the SP. Alternatively, it may call the function + * ieee80211_sta_eosp() to inform mac80211 of the end of the SP. + * This callback must be atomic. + * @allow_buffered_frames: Prepare device to allow the given number of frames + * to go out to the given station. The frames will be sent by mac80211 + * via the usual TX path after this call. The TX information for frames + * released will also have the %IEEE80211_TX_CTL_NO_PS_BUFFER flag set + * and the last one will also have %IEEE80211_TX_STATUS_EOSP set. In case + * frames from multiple TIDs are released and the driver might reorder + * them between the TIDs, it must set the %IEEE80211_TX_STATUS_EOSP flag + * on the last frame and clear it on all others and also handle the EOSP + * bit in the QoS header correctly. Alternatively, it can also call the + * ieee80211_sta_eosp() function. + * The @tids parameter is a bitmap and tells the driver which TIDs the + * frames will be on; it will at most have two bits set. + * This callback must be atomic. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * + * @get_rssi: Get current signal strength in dBm, the function is optional + * and can sleep. + * + * @mgd_prepare_tx: Prepare for transmitting a management frame for association + * before associated. In multi-channel scenarios, a virtual interface is + * bound to a channel before it is associated, but as it isn't associated + * yet it need not necessarily be given airtime, in particular since any + * transmission to a P2P GO needs to be synchronized against the GO's + * powersave state. mac80211 will call this function before transmitting a + * management frame prior to having successfully associated to allow the + * driver to give it channel time for the transmission, to get a response + * and to be able to synchronize with the GO. + * The callback will be called before each transmission and upon return + * mac80211 will transmit the frame right away. + * The callback is optional and can (should!) sleep. + * + * @add_chanctx: Notifies device driver about new channel context creation. + * @remove_chanctx: Notifies device driver about channel context destruction. + * @change_chanctx: Notifies device driver about channel context changes that + * may happen when combining different virtual interfaces on the same + * channel context with different settings + * @assign_vif_chanctx: Notifies device driver about channel context being bound + * to vif. Possible use is for hw queue remapping. + * @unassign_vif_chanctx: Notifies device driver about channel context being + * unbound from vif. + * @switch_vif_chanctx: switch a number of vifs from one chanctx to + * another, as specified in the list of + * @ieee80211_vif_chanctx_switch passed to the driver, according + * to the mode defined in &ieee80211_chanctx_switch_mode. + * + * @start_ap: Start operation on the AP interface, this is called after all the + * information in bss_conf is set and beacon can be retrieved. A channel + * context is bound before this is called. Note that if the driver uses + * software scan or ROC, this (and @stop_ap) isn't called when the AP is + * just "paused" for scanning/ROC, which is indicated by the beacon being + * disabled/enabled via @bss_info_changed. + * @stop_ap: Stop operation on the AP interface. + * + * @restart_complete: Called after a call to ieee80211_restart_hw(), when the + * reconfiguration has completed. This can help the driver implement the + * reconfiguration step. Also called when reconfiguring because the + * driver's resume function returned 1, as this is just like an "inline" + * hardware restart. This callback may sleep. + * + * @ipv6_addr_change: IPv6 address assignment on the given interface changed. + * Currently, this is only called for managed or P2P client interfaces. + * This callback is optional; it must not sleep. + * + * @channel_switch_beacon: Starts a channel switch to a new channel. + * Beacons are modified to include CSA or ECSA IEs before calling this + * function. The corresponding count fields in these IEs must be + * decremented, and when they reach 1 the driver must call + * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get() + * get the csa counter decremented by mac80211, but must check if it is + * 1 using ieee80211_csa_is_complete() after the beacon has been + * transmitted and then call ieee80211_csa_finish(). + * If the CSA count starts as zero or 1, this function will not be called, + * since there won't be any time to beacon before the switch anyway. + * + * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all + * information in bss_conf is set up and the beacon can be retrieved. A + * channel context is bound before this is called. + * @leave_ibss: Leave the IBSS again. + * + * @get_expected_throughput: extract the expected throughput towards the + * specified station. The returned value is expressed in Kbps. It returns 0 + * if the RC algorithm does not have proper data to provide. */ struct ieee80211_ops { - int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_tx_control *control); + void (*tx)(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb); int (*start)(struct ieee80211_hw *hw); void (*stop)(struct ieee80211_hw *hw); +#ifdef CONFIG_PM + int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); + int (*resume)(struct ieee80211_hw *hw); + void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled); +#endif int (*add_interface)(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf); - void (*remove_interface)(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf); - int (*config)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); - int (*config_interface)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + int (*change_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_if_conf *conf); + enum nl80211_iftype new_type, bool p2p); + void (*remove_interface)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + int (*config)(struct ieee80211_hw *hw, u32 changed); void (*bss_info_changed)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed); + + int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + + u64 (*prepare_multicast)(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); void (*configure_filter)(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, - int mc_count, struct dev_addr_list *mc_list); - int (*set_tim)(struct ieee80211_hw *hw, int aid, int set); + u64 multicast); + int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set); int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, - const u8 *local_address, const u8 *address, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); - int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); + void (*update_tkip_key)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *conf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); + void (*set_rekey_data)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); + void (*set_default_unicast_key)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx); + int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); + void (*cancel_hw_scan)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + int (*sched_scan_start)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies); + int (*sched_scan_stop)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + void (*sw_scan_start)(struct ieee80211_hw *hw); + void (*sw_scan_complete)(struct ieee80211_hw *hw); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32, u16 *iv16); - int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value); - int (*set_retry_limit)(struct ieee80211_hw *hw, - u32 short_retry, u32 long_retr); + int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); + int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +#ifdef CONFIG_MAC80211_DEBUGFS + void (*sta_add_debugfs)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct dentry *dir); + void (*sta_remove_debugfs)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct dentry *dir); +#endif void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum sta_notify_cmd, const u8 *addr); - int (*conf_tx)(struct ieee80211_hw *hw, int queue, + enum sta_notify_cmd, struct ieee80211_sta *sta); + int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); + void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*sta_rc_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed); + int (*conf_tx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); - int (*get_tx_stats)(struct ieee80211_hw *hw, - struct ieee80211_tx_queue_stats *stats); - u64 (*get_tsf)(struct ieee80211_hw *hw); - void (*reset_tsf)(struct ieee80211_hw *hw); - int (*beacon_update)(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_tx_control *control); + u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 tsf); + void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*tx_last_beacon)(struct ieee80211_hw *hw); - int (*conf_ht)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); int (*ampdu_action)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - const u8 *ra, u16 tid, u16 ssn); + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); + int (*get_survey)(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); + void (*rfkill_poll)(struct ieee80211_hw *hw); + void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); +#ifdef CONFIG_NL80211_TESTMODE + int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len); +#endif + void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); + void (*channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); + int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); + int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); + + int (*remain_on_channel)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type); + int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); + int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx); + void (*get_ringparam)(struct ieee80211_hw *hw, + u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); + bool (*tx_frames_pending)(struct ieee80211_hw *hw); + int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask); + void (*rssi_callback)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_rssi_event rssi_event); + + void (*allow_buffered_frames)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); + void (*release_buffered_frames)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); + + int (*get_et_sset_count)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); + void (*get_et_stats)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); + int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, s8 *rssi_dbm); + + void (*mgd_prepare_tx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + + int (*add_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); + void (*remove_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); + void (*change_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed); + int (*assign_vif_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx); + void (*unassign_vif_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx); + int (*switch_vif_chanctx)(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode); + + void (*restart_complete)(struct ieee80211_hw *hw); + +#if IS_ENABLED(CONFIG_IPV6) + void (*ipv6_addr_change)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev); +#endif + void (*channel_switch_beacon)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef); + + int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + u32 (*get_expected_throughput)(struct ieee80211_sta *sta); }; /** @@ -1176,6 +3026,8 @@ struct ieee80211_ops { * * @priv_data_len: length of private data * @ops: callbacks for this device + * + * Return: A pointer to the new hardware device, or %NULL on error. */ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops); @@ -1183,18 +3035,49 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, /** * ieee80211_register_hw - Register hardware device * - * You must call this function before any other functions - * except ieee80211_register_hwmode. + * You must call this function before any other functions in + * mac80211. Note that before a hardware can be registered, you + * need to fill the contained wiphy's information. * * @hw: the device to register as returned by ieee80211_alloc_hw() + * + * Return: 0 on success. An error code otherwise. */ int ieee80211_register_hw(struct ieee80211_hw *hw); +/** + * struct ieee80211_tpt_blink - throughput blink description + * @throughput: throughput in Kbit/sec + * @blink_time: blink time in milliseconds + * (full cycle, ie. one off + one on period) + */ +struct ieee80211_tpt_blink { + int throughput; + int blink_time; +}; + +/** + * enum ieee80211_tpt_led_trigger_flags - throughput trigger flags + * @IEEE80211_TPT_LEDTRIG_FL_RADIO: enable blinking with radio + * @IEEE80211_TPT_LEDTRIG_FL_WORK: enable blinking when working + * @IEEE80211_TPT_LEDTRIG_FL_CONNECTED: enable blinking when at least one + * interface is connected in some way, including being an AP + */ +enum ieee80211_tpt_led_trigger_flags { + IEEE80211_TPT_LEDTRIG_FL_RADIO = BIT(0), + IEEE80211_TPT_LEDTRIG_FL_WORK = BIT(1), + IEEE80211_TPT_LEDTRIG_FL_CONNECTED = BIT(2), +}; + #ifdef CONFIG_MAC80211_LEDS -extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); +char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, + unsigned int flags, + const struct ieee80211_tpt_blink *blink_table, + unsigned int blink_table_len); #endif /** * ieee80211_get_tx_led_name - get name of TX LED @@ -1205,6 +3088,8 @@ extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for + * + * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { @@ -1224,6 +3109,8 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for + * + * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) { @@ -1243,6 +3130,8 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for + * + * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) { @@ -1262,6 +3151,8 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) * of the trigger so you can automatically link the LED device. * * @hw: the hardware to get the LED trigger name for + * + * Return: The name of the LED trigger. %NULL if not configured for LEDs. */ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) { @@ -1272,9 +3163,30 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) #endif } -/* Register a new hardware PHYMODE capability to the stack. */ -int ieee80211_register_hwmode(struct ieee80211_hw *hw, - struct ieee80211_hw_mode *mode); +/** + * ieee80211_create_tpt_led_trigger - create throughput LED trigger + * @hw: the hardware to create the trigger for + * @flags: trigger flags, see &enum ieee80211_tpt_led_trigger_flags + * @blink_table: the blink table -- needs to be ordered by throughput + * @blink_table_len: size of the blink table + * + * Return: %NULL (in case of error, or if no LED triggers are + * configured) or the name of the new trigger. + * + * Note: This function must be called before ieee80211_register_hw(). + */ +static inline char * +ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, + const struct ieee80211_tpt_blink *blink_table, + unsigned int blink_table_len) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_create_tpt_led_trigger(hw, flags, blink_table, + blink_table_len); +#else + return NULL; +#endif +} /** * ieee80211_unregister_hw - Unregister a hardware device @@ -1291,51 +3203,199 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw); * * This function frees everything that was allocated, including the * private data for the driver. You must call ieee80211_unregister_hw() - * before calling this function + * before calling this function. * * @hw: the hardware to free */ void ieee80211_free_hw(struct ieee80211_hw *hw); -/* trick to avoid symbol clashes with the ieee80211 subsystem */ -void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_rx_status *status); +/** + * ieee80211_restart_hw - restart hardware completely + * + * Call this function when the hardware was restarted for some reason + * (hardware error, ...) and the driver is unable to restore its state + * by itself. mac80211 assumes that at this point the driver/hardware + * is completely uninitialised and stopped, it starts the process by + * calling the ->start() operation. The driver will need to reset all + * internal state that it has prior to calling this function. + * + * @hw: the hardware to restart + */ +void ieee80211_restart_hw(struct ieee80211_hw *hw); + +/** + * ieee80211_napi_add - initialize mac80211 NAPI context + * @hw: the hardware to initialize the NAPI context on + * @napi: the NAPI context to initialize + * @napi_dev: dummy NAPI netdevice, here to not waste the space if the + * driver doesn't use NAPI + * @poll: poll function + * @weight: default weight + * + * See also netif_napi_add(). + */ +void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi, + struct net_device *napi_dev, + int (*poll)(struct napi_struct *, int), + int weight); /** * ieee80211_rx - receive frame * * Use this function to hand received frames to mac80211. The receive - * buffer in @skb must start with an IEEE 802.11 header or a radiotap - * header if %RX_FLAG_RADIOTAP is set in the @status flags. + * buffer in @skb must start with an IEEE 802.11 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee80211 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls to + * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be + * mixed for a single hardware. Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). * - * This function may not be called in IRQ context. + * In process context use instead ieee80211_rx_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call - * @status: status of this frame; the status pointer need not be valid - * after this function returns */ -static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_rx_status *status) -{ - __ieee80211_rx(hw, skb, status); -} +void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb); /** * ieee80211_rx_irqsafe - receive frame * * Like ieee80211_rx() but can be called in IRQ context - * (internally defers to a workqueue.) + * (internally defers to a tasklet.) + * + * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not + * be mixed for a single hardware.Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + */ +void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); + +/** + * ieee80211_rx_ni - receive frame (in process context) + * + * Like ieee80211_rx() but can be called in process context + * (internally disables bottom halves). + * + * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may + * not be mixed for a single hardware. Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call - * @status: status of this frame; the status pointer need not be valid - * after this function returns and is not freed by mac80211, - * it is recommended that it points to a stack area */ -void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_rx_status *status); +static inline void ieee80211_rx_ni(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + local_bh_disable(); + ieee80211_rx(hw, skb); + local_bh_enable(); +} + +/** + * ieee80211_sta_ps_transition - PS transition for connected sta + * + * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS + * flag set, use this function to inform mac80211 about a connected station + * entering/leaving PS mode. + * + * This function may not be called in IRQ context or with softirqs enabled. + * + * Calls to this function for a single hardware must be synchronized against + * each other. + * + * @sta: currently connected sta + * @start: start or stop PS + * + * Return: 0 on success. -EINVAL when the requested PS mode is already set. + */ +int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start); + +/** + * ieee80211_sta_ps_transition_ni - PS transition for connected sta + * (in process context) + * + * Like ieee80211_sta_ps_transition() but can be called in process context + * (internally disables bottom halves). Concurrent call restriction still + * applies. + * + * @sta: currently connected sta + * @start: start or stop PS + * + * Return: Like ieee80211_sta_ps_transition(). + */ +static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta, + bool start) +{ + int ret; + + local_bh_disable(); + ret = ieee80211_sta_ps_transition(sta, start); + local_bh_enable(); + + return ret; +} + +/* + * The TX headroom reserved by mac80211 for its own tx_status functions. + * This is enough for the radiotap header. + */ +#define IEEE80211_TX_STATUS_HEADROOM 14 + +/** + * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames + * @sta: &struct ieee80211_sta pointer for the sleeping station + * @tid: the TID that has buffered frames + * @buffered: indicates whether or not frames are buffered for this TID + * + * If a driver buffers frames for a powersave station instead of passing + * them back to mac80211 for retransmission, the station may still need + * to be told that there are buffered frames via the TIM bit. + * + * This function informs mac80211 whether or not there are frames that are + * buffered in the driver for a given TID; mac80211 can then use this data + * to set the TIM bit (NOTE: This may call back into the driver's set_tim + * call! Beware of the locking!) + * + * If all frames are released to the station (due to PS-poll or uAPSD) + * then the driver needs to inform mac80211 that there no longer are + * frames buffered. However, when the station wakes up mac80211 assumes + * that all buffered frames will be transmitted and clears this data, + * drivers need to make sure they inform mac80211 about all buffered + * frames on the sleep transition (sta_notify() with %STA_NOTIFY_SLEEP). + * + * Note that technically mac80211 only needs to know this per AC, not per + * TID, but since driver buffering will inevitably happen per TID (since + * it is related to aggregation) it is easier to make mac80211 map the + * TID to the AC as required instead of keeping track in all drivers that + * use this API. + */ +void ieee80211_sta_set_buffered(struct ieee80211_sta *sta, + u8 tid, bool buffered); + +/** + * ieee80211_get_tx_rates - get the selected transmit rates for a packet + * + * Call this function in a driver with per-packet rate selection support + * to combine the rate info in the packet tx info with the most recent + * rate selection table for the station entry. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @sta: the receiver station to which this packet is sent. + * @skb: the frame to be transmitted. + * @dest: buffer for extracted rate/retry information + * @max_rates: maximum number of rates to fetch + */ +void ieee80211_get_tx_rates(struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct ieee80211_tx_rate *dest, + int max_rates); /** * ieee80211_tx_status - transmit status callback @@ -1344,43 +3404,253 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, * transmitted. It is permissible to not call this function for * multicast frames but this can affect statistics. * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls + * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe() + * may not be mixed for a single hardware. Must not run concurrently with + * ieee80211_rx() or ieee80211_rx_ni(). + * * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call - * @status: status information for this frame; the status pointer need not - * be valid after this function returns and is not freed by mac80211, - * it is recommended that it points to a stack area */ void ieee80211_tx_status(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_tx_status *status); + struct sk_buff *skb); + +/** + * ieee80211_tx_status_ni - transmit status callback (in process context) + * + * Like ieee80211_tx_status() but can be called in process context. + * + * Calls to this function, ieee80211_tx_status() and + * ieee80211_tx_status_irqsafe() may not be mixed + * for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @skb: the frame that was transmitted, owned by mac80211 after this call + */ +static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + local_bh_disable(); + ieee80211_tx_status(hw, skb); + local_bh_enable(); +} + +/** + * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback + * + * Like ieee80211_tx_status() but can be called in IRQ context + * (internally defers to a tasklet.) + * + * Calls to this function, ieee80211_tx_status() and + * ieee80211_tx_status_ni() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @skb: the frame that was transmitted, owned by mac80211 after this call + */ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_tx_status *status); + struct sk_buff *skb); /** - * ieee80211_beacon_get - beacon generation function + * ieee80211_report_low_ack - report non-responding station + * + * When operating in AP-mode, call this function to report a non-responding + * connected STA. + * + * @sta: the non-responding connected sta + * @num_packets: number of packets sent to @sta without a response + */ +void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets); + +#define IEEE80211_MAX_CSA_COUNTERS_NUM 2 + +/** + * struct ieee80211_mutable_offsets - mutable beacon offsets + * @tim_offset: position of TIM element + * @tim_length: size of TIM element + * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets + * to CSA counters. This array can contain zero values which + * should be ignored. + */ +struct ieee80211_mutable_offsets { + u16 tim_offset; + u16 tim_length; + + u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM]; +}; + +/** + * ieee80211_beacon_get_template - beacon template generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. - * @control: will be filled with information needed to send this beacon. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @offs: &struct ieee80211_mutable_offsets pointer to struct that will + * receive the offsets that may be updated by the driver. + * + * If the driver implements beaconing modes, it must use this function to + * obtain the beacon template. + * + * This function should be used if the beacon frames are generated by the + * device, and then the driver must use the returned beacon as the template + * The driver or the device are responsible to update the DTIM and, when + * applicable, the CSA count. + * + * The driver is responsible for freeing the returned skb. + * + * Return: The beacon template. %NULL on error. + */ +struct sk_buff * +ieee80211_beacon_get_template(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs); + +/** + * ieee80211_beacon_get_tim - beacon generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @tim_offset: pointer to variable that will receive the TIM IE offset. + * Set to 0 if invalid (in non-AP modes). + * @tim_length: pointer to variable that will receive the TIM IE length, + * (including the ID and length bytes!). + * Set to 0 if invalid (in non-AP modes). + * + * If the driver implements beaconing modes, it must use this function to + * obtain the beacon frame. * * If the beacon frames are generated by the host system (i.e., not in - * hardware/firmware), the low-level driver uses this function to receive - * the next beacon frame from the 802.11 code. The low-level is responsible - * for calling this function before beacon data is needed (e.g., based on - * hardware interrupt). Returned skb is used only once and low-level driver - * is responsible of freeing it. + * hardware/firmware), the driver uses this function to get each beacon + * frame from mac80211 -- it is responsible for calling this function exactly + * once before the beacon is needed (e.g. based on hardware interrupt). + * + * The driver is responsible for freeing the returned skb. + * + * Return: The beacon template. %NULL on error. */ -struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_tx_control *control); +struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 *tim_offset, u16 *tim_length); + +/** + * ieee80211_beacon_get - beacon generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * See ieee80211_beacon_get_tim(). + * + * Return: See ieee80211_beacon_get_tim(). + */ +static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return ieee80211_beacon_get_tim(hw, vif, NULL, NULL); +} + +/** + * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * The csa counter should be updated after each beacon transmission. + * This function is called implicitly when + * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the + * beacon frames are generated by the device, the driver should call this + * function after each beacon transmission to sync mac80211's csa counters. + * + * Return: new csa counter value + */ +u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif); + +/** + * ieee80211_csa_finish - notify mac80211 about channel switch + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * After a channel switch announcement was scheduled and the counter in this + * announcement hits 1, this function must be called by the driver to + * notify mac80211 that the channel can be changed. + */ +void ieee80211_csa_finish(struct ieee80211_vif *vif); + +/** + * ieee80211_csa_is_complete - find out if counters reached 1 + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * This function returns whether the channel switch counters reached zero. + */ +bool ieee80211_csa_is_complete(struct ieee80211_vif *vif); + + +/** + * ieee80211_proberesp_get - retrieve a Probe Response template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a Probe Response template which can, for example, be uploaded to + * hardware. The destination address should be set by the caller. + * + * Can only be called in AP mode. + * + * Return: The Probe Response template. %NULL on error. + */ +struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_pspoll_get - retrieve a PS Poll template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a PS Poll a template which can, for example, uploaded to + * hardware. The template must be updated after association so that correct + * AID, BSSID and MAC address is used. + * + * Note: Caller (or hardware) is responsible for setting the + * &IEEE80211_FCTL_PM bit. + * + * Return: The PS Poll template. %NULL on error. + */ +struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_nullfunc_get - retrieve a nullfunc template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a Nullfunc template which can, for example, uploaded to + * hardware. The template must be updated after association so that correct + * BSSID and address is used. + * + * Note: Caller (or hardware) is responsible for setting the + * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. + * + * Return: The nullfunc template. %NULL on error. + */ +struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_probereq_get - retrieve a Probe Request template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @ssid: SSID buffer + * @ssid_len: length of SSID + * @tailroom: tailroom to reserve at end of SKB for IEs + * + * Creates a Probe Request template which can, for example, be uploaded to + * hardware. + * + * Return: The Probe Request template. %NULL on error. + */ +struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + size_t tailroom); /** * ieee80211_rts_get - RTS frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame: pointer to the frame that is going to be protected by the RTS. * @frame_len: the frame length (in octets). - * @frame_txctl: &struct ieee80211_tx_control of the frame. + * @frame_txctl: &struct ieee80211_tx_info of the frame. * @rts: The buffer where to store the RTS frame. * * If the RTS frames are generated by the host system (i.e., not in @@ -1390,31 +3660,33 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, */ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl, + const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts); /** * ieee80211_rts_duration - Get the duration field for an RTS frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame_len: the length of the frame that is going to be protected by the RTS. - * @frame_txctl: &struct ieee80211_tx_control of the frame. + * @frame_txctl: &struct ieee80211_tx_info of the frame. * * If the RTS is generated in firmware, but the host system must provide * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. + * + * Return: The duration. */ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl); + const struct ieee80211_tx_info *frame_txctl); /** * ieee80211_ctstoself_get - CTS-to-self frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame: pointer to the frame that is going to be protected by the CTS-to-self. * @frame_len: the frame length (in octets). - * @frame_txctl: &struct ieee80211_tx_control of the frame. + * @frame_txctl: &struct ieee80211_tx_info of the frame. * @cts: The buffer where to store the CTS-to-self frame. * * If the CTS-to-self frames are generated by the host system (i.e., not in @@ -1425,53 +3697,59 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl, + const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts); /** * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. - * @frame_txctl: &struct ieee80211_tx_control of the frame. + * @frame_txctl: &struct ieee80211_tx_info of the frame. * * If the CTS-to-self is generated in firmware, but the host system must provide * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. + * + * Return: The duration. */ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl); + const struct ieee80211_tx_info *frame_txctl); /** * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @band: the band to calculate the frame duration on * @frame_len: the length of the frame. - * @rate: the rate (in 100kbps) at which the frame is going to be transmitted. + * @rate: the rate at which the frame is going to be transmitted. * * Calculate the duration field of some generic frame, given its * length and transmission rate (in 100kbps). + * + * Return: The duration. */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_band band, size_t frame_len, - int rate); + struct ieee80211_rate *rate); /** * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames * @hw: pointer as obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. - * @control: will be filled with information needed to send returned frame. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * * Function for accessing buffered broadcast and multicast frames. If * hardware/firmware does not implement buffering of broadcast/multicast * frames when power saving is used, 802.11 code buffers them in the host * memory. The low-level driver uses this function to fetch next buffered - * frame. In most cases, this is used when generating beacon frame. This - * function returns a pointer to the next buffered skb or NULL if no more - * buffered frames are available. + * frame. In most cases, this is used when generating beacon frame. + * + * Return: A pointer to the next buffered skb or NULL if no more buffered + * frames are available. * * Note: buffered frames are returned only after DTIM beacon frame was * generated with ieee80211_beacon_get() and the low-level driver must thus @@ -1481,30 +3759,234 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, * use common code for all beacons. */ struct sk_buff * -ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_tx_control *control); +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** - * ieee80211_get_hdrlen_from_skb - get header length from data + * ieee80211_get_tkip_p1k_iv - get a TKIP phase 1 key for IV32 * - * Given an skb with a raw 802.11 header at the data pointer this function - * returns the 802.11 header length in bytes (not including encryption - * headers). If the data in the sk_buff is too short to contain a valid 802.11 - * header the function returns 0. + * This function returns the TKIP phase 1 key for the given IV32. * - * @skb: the frame + * @keyconf: the parameter passed with the set key + * @iv32: IV32 to get the P1K for + * @p1k: a buffer to which the key will be written, as 5 u16 values */ -int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); +void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, + u32 iv32, u16 *p1k); /** - * ieee80211_get_hdrlen - get header length from frame control + * ieee80211_get_tkip_p1k - get a TKIP phase 1 key * - * This function returns the 802.11 header length in bytes (not including - * encryption headers.) + * This function returns the TKIP phase 1 key for the IV32 taken + * from the given packet. * - * @fc: the frame control field (in CPU endianness) + * @keyconf: the parameter passed with the set key + * @skb: the packet to take the IV32 value from that will be encrypted + * with this P1K + * @p1k: a buffer to which the key will be written, as 5 u16 values */ -int ieee80211_get_hdrlen(u16 fc); +static inline void ieee80211_get_tkip_p1k(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, u16 *p1k) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + u32 iv32 = get_unaligned_le32(&data[4]); + + ieee80211_get_tkip_p1k_iv(keyconf, iv32, p1k); +} + +/** + * ieee80211_get_tkip_rx_p1k - get a TKIP phase 1 key for RX + * + * This function returns the TKIP phase 1 key for the given IV32 + * and transmitter address. + * + * @keyconf: the parameter passed with the set key + * @ta: TA that will be used with the key + * @iv32: IV32 to get the P1K for + * @p1k: a buffer to which the key will be written, as 5 u16 values + */ +void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, + const u8 *ta, u32 iv32, u16 *p1k); + +/** + * ieee80211_get_tkip_p2k - get a TKIP phase 2 key + * + * This function computes the TKIP RC4 key for the IV values + * in the packet. + * + * @keyconf: the parameter passed with the set key + * @skb: the packet to take the IV32/IV16 values from that will be + * encrypted with this key + * @p2k: a buffer to which the key will be written, 16 bytes + */ +void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, u8 *p2k); + +/** + * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys + * + * This function computes the two AES-CMAC sub-keys, based on the + * previously installed master key. + * + * @keyconf: the parameter passed with the set key + * @k1: a buffer to be filled with the 1st sub-key + * @k2: a buffer to be filled with the 2nd sub-key + */ +void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf, + u8 *k1, u8 *k2); + +/** + * struct ieee80211_key_seq - key sequence counter + * + * @tkip: TKIP data, containing IV32 and IV16 in host byte order + * @ccmp: PN data, most significant byte first (big endian, + * reverse order than in packet) + * @aes_cmac: PN data, most significant byte first (big endian, + * reverse order than in packet) + */ +struct ieee80211_key_seq { + union { + struct { + u32 iv32; + u16 iv16; + } tkip; + struct { + u8 pn[6]; + } ccmp; + struct { + u8 pn[6]; + } aes_cmac; + }; +}; + +/** + * ieee80211_get_key_tx_seq - get key TX sequence counter + * + * @keyconf: the parameter passed with the set key + * @seq: buffer to receive the sequence data + * + * This function allows a driver to retrieve the current TX IV/PN + * for the given key. It must not be called if IV generation is + * offloaded to the device. + * + * Note that this function may only be called when no TX processing + * can be done concurrently, for example when queues are stopped + * and the stop has been synchronized. + */ +void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, + struct ieee80211_key_seq *seq); + +/** + * ieee80211_get_key_rx_seq - get key RX sequence counter + * + * @keyconf: the parameter passed with the set key + * @tid: The TID, or -1 for the management frame value (CCMP only); + * the value on TID 0 is also used for non-QoS frames. For + * CMAC, only TID 0 is valid. + * @seq: buffer to receive the sequence data + * + * This function allows a driver to retrieve the current RX IV/PNs + * for the given key. It must not be called if IV checking is done + * by the device and not by mac80211. + * + * Note that this function may only be called when no RX processing + * can be done concurrently. + */ +void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq); + +/** + * ieee80211_set_key_tx_seq - set key TX sequence counter + * + * @keyconf: the parameter passed with the set key + * @seq: new sequence data + * + * This function allows a driver to set the current TX IV/PNs for the + * given key. This is useful when resuming from WoWLAN sleep and the + * device may have transmitted frames using the PTK, e.g. replies to + * ARP requests. + * + * Note that this function may only be called when no TX processing + * can be done concurrently. + */ +void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, + struct ieee80211_key_seq *seq); + +/** + * ieee80211_set_key_rx_seq - set key RX sequence counter + * + * @keyconf: the parameter passed with the set key + * @tid: The TID, or -1 for the management frame value (CCMP only); + * the value on TID 0 is also used for non-QoS frames. For + * CMAC, only TID 0 is valid. + * @seq: new sequence data + * + * This function allows a driver to set the current RX IV/PNs for the + * given key. This is useful when resuming from WoWLAN sleep and GTK + * rekey may have been done while suspended. It should not be called + * if IV checking is done by the device and not by mac80211. + * + * Note that this function may only be called when no RX processing + * can be done concurrently. + */ +void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq); + +/** + * ieee80211_remove_key - remove the given key + * @keyconf: the parameter passed with the set key + * + * Remove the given key. If the key was uploaded to the hardware at the + * time this function is called, it is not deleted in the hardware but + * instead assumed to have been removed already. + * + * Note that due to locking considerations this function can (currently) + * only be called during key iteration (ieee80211_iter_keys().) + */ +void ieee80211_remove_key(struct ieee80211_key_conf *keyconf); + +/** + * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN + * @vif: the virtual interface to add the key on + * @keyconf: new key data + * + * When GTK rekeying was done while the system was suspended, (a) new + * key(s) will be available. These will be needed by mac80211 for proper + * RX processing, so this function allows setting them. + * + * The function returns the newly allocated key structure, which will + * have similar contents to the passed key configuration but point to + * mac80211-owned memory. In case of errors, the function returns an + * ERR_PTR(), use IS_ERR() etc. + * + * Note that this function assumes the key isn't added to hardware + * acceleration, so no TX will be done with the key. Since it's a GTK + * on managed (station) networks, this is true anyway. If the driver + * calls this function from the resume callback and subsequently uses + * the return code 1 to reconfigure the device, this key will be part + * of the reconfiguration. + * + * Note that the driver should also call ieee80211_set_key_rx_seq() + * for the new key for each TID to set up sequence counters properly. + * + * IMPORTANT: If this replaces a key that is present in the hardware, + * then it will attempt to remove it during this call. In many cases + * this isn't what you want, so call ieee80211_remove_key() first for + * the key that's being replaced. + */ +struct ieee80211_key_conf * +ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf); + +/** + * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying + * @vif: virtual interface the rekeying was done on + * @bssid: The BSSID of the AP, for checking association + * @replay_ctr: the new replay counter after GTK rekeying + * @gfp: allocation flags + */ +void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, + const u8 *replay_ctr, gfp_t gfp); /** * ieee80211_wake_queue - wake specific queue @@ -1525,12 +4007,16 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue); void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); /** - * ieee80211_start_queues - start all queues - * @hw: pointer to as obtained from ieee80211_alloc_hw(). + * ieee80211_queue_stopped - test status of the queue + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * @queue: queue number (counted from zero). + * + * Drivers should use this function instead of netif_stop_queue. * - * Drivers should use this function instead of netif_start_queue. + * Return: %true if the queue is stopped. %false otherwise. */ -void ieee80211_start_queues(struct ieee80211_hw *hw); + +int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue); /** * ieee80211_stop_queues - stop all queues @@ -1553,25 +4039,780 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw); * * When hardware scan offload is used (i.e. the hw_scan() callback is * assigned) this function needs to be called by the driver to notify - * mac80211 that the scan finished. + * mac80211 that the scan finished. This function can be called from + * any context, including hardirq context. * * @hw: the hardware that finished the scan + * @aborted: set to true if scan was aborted + */ +void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted); + +/** + * ieee80211_sched_scan_results - got results from scheduled scan + * + * When a scheduled scan is running, this function needs to be called by the + * driver whenever there are new scan results available. + * + * @hw: the hardware that is performing scheduled scans + */ +void ieee80211_sched_scan_results(struct ieee80211_hw *hw); + +/** + * ieee80211_sched_scan_stopped - inform that the scheduled scan has stopped + * + * When a scheduled scan is running, this function can be called by + * the driver if it needs to stop the scan to perform another task. + * Usual scenarios are drivers that cannot continue the scheduled scan + * while associating, for instance. + * + * @hw: the hardware that is performing scheduled scans */ -void ieee80211_scan_completed(struct ieee80211_hw *hw); +void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw); + +/** + * enum ieee80211_interface_iteration_flags - interface iteration flags + * @IEEE80211_IFACE_ITER_NORMAL: Iterate over all interfaces that have + * been added to the driver; However, note that during hardware + * reconfiguration (after restart_hw) it will iterate over a new + * interface and over all the existing interfaces even if they + * haven't been re-added to the driver yet. + * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all + * interfaces, even if they haven't been re-added to the driver yet. + */ +enum ieee80211_interface_iteration_flags { + IEEE80211_IFACE_ITER_NORMAL = 0, + IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0), +}; /** * ieee80211_iterate_active_interfaces - iterate active interfaces * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. + * This function allows the iterator function to sleep, when the iterator + * function is atomic @ieee80211_iterate_active_interfaces_atomic can + * be used. + * Does not iterate over a new interface during add_interface(). * * @hw: the hardware struct of which the interfaces should be iterated over - * @iterator: the iterator function to call, cannot sleep + * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags + * @iterator: the iterator function to call * @data: first argument of the iterator function */ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, + u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data); +/** + * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces + * + * This function iterates over the interfaces associated with a given + * hardware that are currently active and calls the callback for them. + * This function requires the iterator callback function to be atomic, + * if that is not desired, use @ieee80211_iterate_active_interfaces instead. + * Does not iterate over a new interface during add_interface(). + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, + u32 iter_flags, + void (*iterator)(void *data, + u8 *mac, + struct ieee80211_vif *vif), + void *data); + +/** + * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces + * + * This function iterates over the interfaces associated with a given + * hardware that are currently active and calls the callback for them. + * This version can only be used while holding the RTNL. + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, + u32 iter_flags, + void (*iterator)(void *data, + u8 *mac, + struct ieee80211_vif *vif), + void *data); + +/** + * ieee80211_queue_work - add work onto the mac80211 workqueue + * + * Drivers and mac80211 use this to add work onto the mac80211 workqueue. + * This helper ensures drivers are not queueing work when they should not be. + * + * @hw: the hardware struct for the interface we are adding work for + * @work: the work we want to add onto the mac80211 workqueue + */ +void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work); + +/** + * ieee80211_queue_delayed_work - add work onto the mac80211 workqueue + * + * Drivers and mac80211 use this to queue delayed work onto the mac80211 + * workqueue. + * + * @hw: the hardware struct for the interface we are adding work for + * @dwork: delayable work to queue onto the mac80211 workqueue + * @delay: number of jiffies to wait before queueing + */ +void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, + struct delayed_work *dwork, + unsigned long delay); + +/** + * ieee80211_start_tx_ba_session - Start a tx Block Ack session. + * @sta: the station for which to start a BA session + * @tid: the TID to BA on. + * @timeout: session timeout value (in TUs) + * + * Return: success if addBA request was sent, failure otherwise + * + * Although mac80211/low level driver/user space application can estimate + * the need to start aggregation on a certain RA/TID, the session level + * will be managed by the mac80211. + */ +int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid, + u16 timeout); + +/** + * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session. It can be called + * from any context. + */ +void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, + u16 tid); + +/** + * ieee80211_stop_tx_ba_session - Stop a Block Ack session. + * @sta: the station whose BA session to stop + * @tid: the TID to stop BA. + * + * Return: negative error if the TID is invalid, or no aggregation active + * + * Although mac80211/low level driver/user space application can estimate + * the need to stop aggregation on a certain RA/TID, the session level + * will be managed by the mac80211. + */ +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid); + +/** + * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the desired TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session tear down. It + * can be called from any context. + */ +void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, + u16 tid); + +/** + * ieee80211_find_sta - find a station + * + * @vif: virtual interface to look for station on + * @addr: station's address + * + * Return: The station, if found. %NULL otherwise. + * + * Note: This function must be called under RCU lock and the + * resulting pointer is only valid under RCU lock as well. + */ +struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, + const u8 *addr); + +/** + * ieee80211_find_sta_by_ifaddr - find a station on hardware + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @addr: remote station's address + * @localaddr: local address (vif->sdata->vif.addr). Use NULL for 'any'. + * + * Return: The station, if found. %NULL otherwise. + * + * Note: This function must be called under RCU lock and the + * resulting pointer is only valid under RCU lock as well. + * + * NOTE: You may pass NULL for localaddr, but then you will just get + * the first STA that matches the remote address 'addr'. + * We can have multiple STA associated with multiple + * logical stations (e.g. consider a station connecting to another + * BSSID on the same AP hardware without disconnecting first). + * In this case, the result of this method with localaddr NULL + * is not reliable. + * + * DO NOT USE THIS FUNCTION with localaddr NULL if at all possible. + */ +struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, + const u8 *addr, + const u8 *localaddr); + +/** + * ieee80211_sta_block_awake - block station from waking up + * @hw: the hardware + * @pubsta: the station + * @block: whether to block or unblock + * + * Some devices require that all frames that are on the queues + * for a specific station that went to sleep are flushed before + * a poll response or frames after the station woke up can be + * delivered to that it. Note that such frames must be rejected + * by the driver as filtered, with the appropriate status flag. + * + * This function allows implementing this mode in a race-free + * manner. + * + * To do this, a driver must keep track of the number of frames + * still enqueued for a specific station. If this number is not + * zero when the station goes to sleep, the driver must call + * this function to force mac80211 to consider the station to + * be asleep regardless of the station's actual state. Once the + * number of outstanding frames reaches zero, the driver must + * call this function again to unblock the station. That will + * cause mac80211 to be able to send ps-poll responses, and if + * the station queried in the meantime then frames will also + * be sent out as a result of this. Additionally, the driver + * will be notified that the station woke up some time after + * it is unblocked, regardless of whether the station actually + * woke up while blocked or not. + */ +void ieee80211_sta_block_awake(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, bool block); + +/** + * ieee80211_sta_eosp - notify mac80211 about end of SP + * @pubsta: the station + * + * When a device transmits frames in a way that it can't tell + * mac80211 in the TX status about the EOSP, it must clear the + * %IEEE80211_TX_STATUS_EOSP bit and call this function instead. + * This applies for PS-Poll as well as uAPSD. + * + * Note that just like with _tx_status() and _rx() drivers must + * not mix calls to irqsafe/non-irqsafe versions, this function + * must not be mixed with those either. Use the all irqsafe, or + * all non-irqsafe, don't mix! + * + * NB: the _irqsafe version of this function doesn't exist, no + * driver needs it right now. Don't call this function if + * you'd need the _irqsafe version, look at the git history + * and restore the _irqsafe version! + */ +void ieee80211_sta_eosp(struct ieee80211_sta *pubsta); + +/** + * ieee80211_iter_keys - iterate keys programmed into the device + * @hw: pointer obtained from ieee80211_alloc_hw() + * @vif: virtual interface to iterate, may be %NULL for all + * @iter: iterator function that will be called for each key + * @iter_data: custom data to pass to the iterator function + * + * This function can be used to iterate all the keys known to + * mac80211, even those that weren't previously programmed into + * the device. This is intended for use in WoWLAN if the device + * needs reprogramming of the keys during suspend. Note that due + * to locking reasons, it is also only safe to call this at few + * spots since it must hold the RTNL and be able to sleep. + * + * The order in which the keys are iterated matches the order + * in which they were originally installed and handed to the + * set_key callback. + */ +void ieee80211_iter_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data); + +/** + * ieee80211_iter_chan_contexts_atomic - iterate channel contexts + * @hw: pointre obtained from ieee80211_alloc_hw(). + * @iter: iterator function + * @iter_data: data passed to iterator function + * + * Iterate all active channel contexts. This function is atomic and + * doesn't acquire any locks internally that might be held in other + * places while calling into the driver. + * + * The iterator will not find a context that's being added (during + * the driver callback to add it) but will find it while it's being + * removed. + * + * Note that during hardware restart, all contexts that existed + * before the restart are considered already present so will be + * found while iterating, whether they've been re-added already + * or not. + */ +void ieee80211_iter_chan_contexts_atomic( + struct ieee80211_hw *hw, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf, + void *data), + void *iter_data); + +/** + * ieee80211_ap_probereq_get - retrieve a Probe Request template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a Probe Request template which can, for example, be uploaded to + * hardware. The template is filled with bssid, ssid and supported rate + * information. This function must only be called from within the + * .bss_info_changed callback function and only in managed mode. The function + * is only useful when the interface is associated, otherwise it will return + * %NULL. + * + * Return: The Probe Request template. %NULL on error. + */ +struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_beacon_loss - inform hardware does not receive beacons + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER and + * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the + * hardware is not receiving beacons with this function. + */ +void ieee80211_beacon_loss(struct ieee80211_vif *vif); + +/** + * ieee80211_connection_loss - inform hardware has lost connection to the AP + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * When beacon filtering is enabled with %IEEE80211_VIF_BEACON_FILTER, and + * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver + * needs to inform if the connection to the AP has been lost. + * The function may also be called if the connection needs to be terminated + * for some other reason, even if %IEEE80211_HW_CONNECTION_MONITOR isn't set. + * + * This function will cause immediate change to disassociated state, + * without connection recovery attempts. + */ +void ieee80211_connection_loss(struct ieee80211_vif *vif); + +/** + * ieee80211_resume_disconnect - disconnect from AP after resume + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Instructs mac80211 to disconnect from the AP after resume. + * Drivers can use this after WoWLAN if they know that the + * connection cannot be kept up, for example because keys were + * used while the device was asleep but the replay counters or + * similar cannot be retrieved from the device during resume. + * + * Note that due to implementation issues, if the driver uses + * the reconfiguration functionality during resume the interface + * will still be added as associated first during resume and then + * disconnect normally later. + * + * This function can only be called from the resume callback and + * the driver must not be holding any of its own locks while it + * calls this function, or at least not any locks it needs in the + * key configuration paths (if it supports HW crypto). + */ +void ieee80211_resume_disconnect(struct ieee80211_vif *vif); + +/** + * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring + * rssi threshold triggered + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @rssi_event: the RSSI trigger event type + * @gfp: context flags + * + * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality + * monitoring is configured with an rssi threshold, the driver will inform + * whenever the rssi level reaches the threshold. + */ +void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, + enum nl80211_cqm_rssi_threshold_event rssi_event, + gfp_t gfp); + +/** + * ieee80211_radar_detected - inform that a radar was detected + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + */ +void ieee80211_radar_detected(struct ieee80211_hw *hw); + +/** + * ieee80211_chswitch_done - Complete channel switch process + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @success: make the channel switch successful or not + * + * Complete the channel switch post-process: set the new operational channel + * and wake up the suspended queues. + */ +void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); + +/** + * ieee80211_request_smps - request SM PS transition + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @smps_mode: new SM PS mode + * + * This allows the driver to request an SM PS transition in managed + * mode. This is useful when the driver has more information than + * the stack about possible interference, for example by bluetooth. + */ +void ieee80211_request_smps(struct ieee80211_vif *vif, + enum ieee80211_smps_mode smps_mode); + +/** + * ieee80211_ready_on_channel - notification of remain-on-channel start + * @hw: pointer as obtained from ieee80211_alloc_hw() + */ +void ieee80211_ready_on_channel(struct ieee80211_hw *hw); + +/** + * ieee80211_remain_on_channel_expired - remain_on_channel duration expired + * @hw: pointer as obtained from ieee80211_alloc_hw() + */ +void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw); + +/** + * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions + * + * in order not to harm the system performance and user experience, the device + * may request not to allow any rx ba session and tear down existing rx ba + * sessions based on system constraints such as periodic BT activity that needs + * to limit wlan activity (eg.sco or a2dp)." + * in such cases, the intention is to limit the duration of the rx ppdu and + * therefore prevent the peer device to use a-mpdu aggregation. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @ba_rx_bitmap: Bit map of open rx ba per tid + * @addr: & to bssid mac address + */ +void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, + const u8 *addr); + +/** + * ieee80211_send_bar - send a BlockAckReq frame + * + * can be used to flush pending frames from the peer's aggregation reorder + * buffer. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @ra: the peer's destination address + * @tid: the TID of the aggregation session + * @ssn: the new starting sequence number for the receiver + */ +void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); + +/* Rate control API */ + +/** + * struct ieee80211_tx_rate_control - rate control information for/from RC algo + * + * @hw: The hardware the algorithm is invoked for. + * @sband: The band this frame is being transmitted on. + * @bss_conf: the current BSS configuration + * @skb: the skb that will be transmitted, the control information in it needs + * to be filled in + * @reported_rate: The rate control algorithm can fill this in to indicate + * which rate should be reported to userspace as the current rate and + * used for rate calculations in the mesh network. + * @rts: whether RTS will be used for this frame because it is longer than the + * RTS threshold + * @short_preamble: whether mac80211 will request short-preamble transmission + * if the selected rate supports it + * @max_rate_idx: user-requested maximum (legacy) rate + * (deprecated; this will be removed once drivers get updated to use + * rate_idx_mask) + * @rate_idx_mask: user-requested (legacy) rate mask + * @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use) + * @bss: whether this frame is sent out in AP or IBSS mode + */ +struct ieee80211_tx_rate_control { + struct ieee80211_hw *hw; + struct ieee80211_supported_band *sband; + struct ieee80211_bss_conf *bss_conf; + struct sk_buff *skb; + struct ieee80211_tx_rate reported_rate; + bool rts, short_preamble; + u8 max_rate_idx; + u32 rate_idx_mask; + u8 *rate_idx_mcs_mask; + bool bss; +}; + +struct rate_control_ops { + const char *name; + void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); + void (*free)(void *priv); + + void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); + void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta); + void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed); + void (*free_sta)(void *priv, struct ieee80211_sta *sta, + void *priv_sta); + + void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb); + void (*get_rate)(void *priv, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc); + + void (*add_sta_debugfs)(void *priv, void *priv_sta, + struct dentry *dir); + void (*remove_sta_debugfs)(void *priv, void *priv_sta); + + u32 (*get_expected_throughput)(void *priv_sta); +}; + +static inline int rate_supported(struct ieee80211_sta *sta, + enum ieee80211_band band, + int index) +{ + return (sta == NULL || sta->supp_rates[band] & BIT(index)); +} + +/** + * rate_control_send_low - helper for drivers for management/no-ack frames + * + * Rate control algorithms that agree to use the lowest rate to + * send management frames and NO_ACK data with the respective hw + * retries should use this in the beginning of their mac80211 get_rate + * callback. If true is returned the rate control can simply return. + * If false is returned we guarantee that sta and sta and priv_sta is + * not null. + * + * Rate control algorithms wishing to do more intelligent selection of + * rate for multicast/broadcast frames may choose to not use this. + * + * @sta: &struct ieee80211_sta pointer to the target destination. Note + * that this may be null. + * @priv_sta: private rate control structure. This may be null. + * @txrc: rate control information we sholud populate for mac80211. + */ +bool rate_control_send_low(struct ieee80211_sta *sta, + void *priv_sta, + struct ieee80211_tx_rate_control *txrc); + + +static inline s8 +rate_lowest_index(struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (rate_supported(sta, sband->band, i)) + return i; + + /* warn when we cannot find a rate. */ + WARN_ON_ONCE(1); + + /* and return 0 (the lowest index) */ + return 0; +} + +static inline +bool rate_usable_index_exists(struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + unsigned int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (rate_supported(sta, sband->band, i)) + return true; + return false; +} + +/** + * rate_control_set_rates - pass the sta rate selection to mac80211/driver + * + * When not doing a rate control probe to test rates, rate control should pass + * its rate selection to mac80211. If the driver supports receiving a station + * rate table, it will use it to ensure that frames are always sent based on + * the most recent rate control module decision. + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @pubsta: &struct ieee80211_sta pointer to the target destination. + * @rates: new tx rate set to be used for this station. + */ +int rate_control_set_rates(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_sta_rates *rates); + +int ieee80211_rate_control_register(const struct rate_control_ops *ops); +void ieee80211_rate_control_unregister(const struct rate_control_ops *ops); + +static inline bool +conf_is_ht20(struct ieee80211_conf *conf) +{ + return conf->chandef.width == NL80211_CHAN_WIDTH_20; +} + +static inline bool +conf_is_ht40_minus(struct ieee80211_conf *conf) +{ + return conf->chandef.width == NL80211_CHAN_WIDTH_40 && + conf->chandef.center_freq1 < conf->chandef.chan->center_freq; +} + +static inline bool +conf_is_ht40_plus(struct ieee80211_conf *conf) +{ + return conf->chandef.width == NL80211_CHAN_WIDTH_40 && + conf->chandef.center_freq1 > conf->chandef.chan->center_freq; +} + +static inline bool +conf_is_ht40(struct ieee80211_conf *conf) +{ + return conf->chandef.width == NL80211_CHAN_WIDTH_40; +} + +static inline bool +conf_is_ht(struct ieee80211_conf *conf) +{ + return (conf->chandef.width != NL80211_CHAN_WIDTH_5) && + (conf->chandef.width != NL80211_CHAN_WIDTH_10) && + (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT); +} + +static inline enum nl80211_iftype +ieee80211_iftype_p2p(enum nl80211_iftype type, bool p2p) +{ + if (p2p) { + switch (type) { + case NL80211_IFTYPE_STATION: + return NL80211_IFTYPE_P2P_CLIENT; + case NL80211_IFTYPE_AP: + return NL80211_IFTYPE_P2P_GO; + default: + break; + } + } + return type; +} + +static inline enum nl80211_iftype +ieee80211_vif_type_p2p(struct ieee80211_vif *vif) +{ + return ieee80211_iftype_p2p(vif->type, vif->p2p); +} + +void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, + int rssi_min_thold, + int rssi_max_thold); + +void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); + +/** + * ieee80211_ave_rssi - report the average RSSI for the specified interface + * + * @vif: the specified virtual interface + * + * Note: This function assumes that the given vif is valid. + * + * Return: The average RSSI value for the requested interface, or 0 if not + * applicable. + */ +int ieee80211_ave_rssi(struct ieee80211_vif *vif); + +/** + * ieee80211_report_wowlan_wakeup - report WoWLAN wakeup + * @vif: virtual interface + * @wakeup: wakeup reason(s) + * @gfp: allocation flags + * + * See cfg80211_report_wowlan_wakeup(). + */ +void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, + struct cfg80211_wowlan_wakeup *wakeup, + gfp_t gfp); + +/** + * ieee80211_tx_prepare_skb - prepare an 802.11 skb for transmission + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @vif: virtual interface + * @skb: frame to be sent from within the driver + * @band: the band to transmit on + * @sta: optional pointer to get the station to send the frame to + * + * Note: must be called under RCU lock + */ +bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct sk_buff *skb, + int band, struct ieee80211_sta **sta); + +/** + * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state + * + * @next_tsf: TSF timestamp of the next absent state change + * @has_next_tsf: next absent state change event pending + * + * @absent: descriptor bitmask, set if GO is currently absent + * + * private: + * + * @count: count fields from the NoA descriptors + * @desc: adjusted data from the NoA + */ +struct ieee80211_noa_data { + u32 next_tsf; + bool has_next_tsf; + + u8 absent; + + u8 count[IEEE80211_P2P_NOA_DESC_MAX]; + struct { + u32 start; + u32 duration; + u32 interval; + } desc[IEEE80211_P2P_NOA_DESC_MAX]; +}; + +/** + * ieee80211_parse_p2p_noa - initialize NoA tracking data from P2P IE + * + * @attr: P2P NoA IE + * @data: NoA tracking data + * @tsf: current TSF timestamp + * + * Return: number of successfully parsed descriptors + */ +int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, + struct ieee80211_noa_data *data, u32 tsf); + +/** + * ieee80211_update_p2p_noa - get next pending P2P GO absent state change + * + * @data: NoA tracking data + * @tsf: current TSF timestamp + */ +void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); + #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h new file mode 100644 index 00000000000..a591053cae6 --- /dev/null +++ b/include/net/mac802154.h @@ -0,0 +1,179 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef NET_MAC802154_H +#define NET_MAC802154_H + +#include <net/af_ieee802154.h> +#include <linux/skbuff.h> + +/* General MAC frame format: + * 2 bytes: Frame Control + * 1 byte: Sequence Number + * 20 bytes: Addressing fields + * 14 bytes: Auxiliary Security Header + */ +#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14) + +/* The following flags are used to indicate changed address settings from + * the stack to the hardware. + */ + +/* indicates that the Short Address changed */ +#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +/* indicates that the IEEE Address changed */ +#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +/* indicates that the PAN ID changed */ +#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +/* indicates that PAN Coordinator status changed */ +#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 + +struct ieee802154_hw_addr_filt { + __le16 pan_id; /* Each independent PAN selects a unique + * identifier. This PAN id allows communication + * between devices within a network using short + * addresses and enables transmissions between + * devices across independent networks. + */ + __le16 short_addr; + __le64 ieee_addr; + u8 pan_coord; +}; + +struct ieee802154_dev { + /* filled by the driver */ + int extra_tx_headroom; + u32 flags; + struct device *parent; + + /* filled by mac802154 core */ + struct ieee802154_hw_addr_filt hw_filt; + void *priv; + struct wpan_phy *phy; +}; + +/* Checksum is in hardware and is omitted from a packet + * + * These following flags are used to indicate hardware capabilities to + * the stack. Generally, flags here should have their meaning + * done in a way that the simplest hardware doesn't need setting + * any particular flags. There are some exceptions to this rule, + * however, so you are advised to review these flags carefully. + */ + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that receiver will autorespond with ACK frames. */ +#define IEEE802154_HW_AACK 0x00000002 + +/* struct ieee802154_ops - callbacks from mac802154 to the driver + * + * This structure contains various callbacks that the driver may + * handle or, in some cases, must handle, for example to transmit + * a frame. + * + * start: Handler that 802.15.4 module calls for device initialization. + * This function is called before the first interface is attached. + * + * stop: Handler that 802.15.4 module calls for device cleanup. + * This function is called after the last interface is removed. + * + * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. + * This function should return zero or negative errno. Called with + * pib_lock held. + * + * ed: Handler that 802.15.4 module calls for Energy Detection. + * This function should place the value for detected energy + * (usually device-dependant) in the level pointer and return + * either zero or negative errno. Called with pib_lock held. + * + * set_channel: + * Set radio for listening on specific channel. + * Set the device for listening on specified channel. + * Returns either zero, or negative errno. Called with pib_lock held. + * + * set_hw_addr_filt: + * Set radio for listening on specific address. + * Set the device for listening on specified address. + * Returns either zero, or negative errno. + * + * set_txpower: + * Set radio transmit power in dB. Called with pib_lock held. + * Returns either zero, or negative errno. + * + * set_lbt + * Enables or disables listen before talk on the device. Called with + * pib_lock held. + * Returns either zero, or negative errno. + * + * set_cca_mode + * Sets the CCA mode used by the device. Called with pib_lock held. + * Returns either zero, or negative errno. + * + * set_cca_ed_level + * Sets the CCA energy detection threshold in dBm. Called with pib_lock + * held. + * Returns either zero, or negative errno. + * + * set_csma_params + * Sets the CSMA parameter set for the PHY. Called with pib_lock held. + * Returns either zero, or negative errno. + * + * set_frame_retries + * Sets the retransmission attempt limit. Called with pib_lock held. + * Returns either zero, or negative errno. + */ +struct ieee802154_ops { + struct module *owner; + int (*start)(struct ieee802154_dev *dev); + void (*stop)(struct ieee802154_dev *dev); + int (*xmit)(struct ieee802154_dev *dev, + struct sk_buff *skb); + int (*ed)(struct ieee802154_dev *dev, u8 *level); + int (*set_channel)(struct ieee802154_dev *dev, + int page, + int channel); + int (*set_hw_addr_filt)(struct ieee802154_dev *dev, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed); + int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr); + int (*set_txpower)(struct ieee802154_dev *dev, int db); + int (*set_lbt)(struct ieee802154_dev *dev, bool on); + int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode); + int (*set_cca_ed_level)(struct ieee802154_dev *dev, + s32 level); + int (*set_csma_params)(struct ieee802154_dev *dev, + u8 min_be, u8 max_be, u8 retries); + int (*set_frame_retries)(struct ieee802154_dev *dev, + s8 retries); +}; + +/* Basic interface to register ieee802154 device */ +struct ieee802154_dev * +ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops); +void ieee802154_free_device(struct ieee802154_dev *dev); +int ieee802154_register_device(struct ieee802154_dev *dev); +void ieee802154_unregister_device(struct ieee802154_dev *dev); + +void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, + u8 lqi); + +#endif /* NET_MAC802154_H */ diff --git a/include/net/mip6.h b/include/net/mip6.h index 63272610a24..0386b618908 100644 --- a/include/net/mip6.h +++ b/include/net/mip6.h @@ -13,8 +13,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. */ /* * Authors: @@ -28,9 +27,6 @@ #include <linux/skbuff.h> #include <net/sock.h> -#define MIP6_OPT_PAD_1 0 -#define MIP6_OPT_PAD_N 1 - /* * Mobility Header */ @@ -42,7 +38,7 @@ struct ip6_mh { __u16 ip6mh_cksum; /* Followed by type specific messages */ __u8 data[0]; -} __attribute__ ((__packed__)); +} __packed; #define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */ #define IP6_MH_TYPE_HOTI 1 /* HOTI Message */ diff --git a/include/net/mld.h b/include/net/mld.h new file mode 100644 index 00000000000..faa1d161bf2 --- /dev/null +++ b/include/net/mld.h @@ -0,0 +1,110 @@ +#ifndef LINUX_MLD_H +#define LINUX_MLD_H + +#include <linux/in6.h> +#include <linux/icmpv6.h> + +/* MLDv1 Query/Report/Done */ +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +#define mld_type mld_hdr.icmp6_type +#define mld_code mld_hdr.icmp6_code +#define mld_cksum mld_hdr.icmp6_cksum +#define mld_maxdelay mld_hdr.icmp6_maxdelay +#define mld_reserved mld_hdr.icmp6_dataun.un_data16[1] + +/* Multicast Listener Discovery version 2 headers */ +/* MLDv2 Report */ +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +#define mld2r_type mld2r_hdr.icmp6_type +#define mld2r_resv1 mld2r_hdr.icmp6_code +#define mld2r_cksum mld2r_hdr.icmp6_cksum +#define mld2r_resv2 mld2r_hdr.icmp6_dataun.un_data16[0] +#define mld2r_ngrec mld2r_hdr.icmp6_dataun.un_data16[1] + +/* MLDv2 Query */ +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mld2q_qrv:3, + mld2q_suppress:1, + mld2q_resv2:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 mld2q_resv2:4, + mld2q_suppress:1, + mld2q_qrv:3; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +#define mld2q_type mld2q_hdr.icmp6_type +#define mld2q_code mld2q_hdr.icmp6_code +#define mld2q_cksum mld2q_hdr.icmp6_cksum +#define mld2q_mrc mld2q_hdr.icmp6_maxdelay +#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1] + +/* RFC3810, 5.1.3. Maximum Response Code: + * + * If Maximum Response Code >= 32768, Maximum Response Code represents a + * floating-point value as follows: + * + * 0 1 2 3 4 5 6 7 8 9 A B C D E F + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |1| exp | mant | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007) +#define MLDV2_MRC_MAN(value) ((value) & 0x0fff) + +/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code): + * + * If QQIC >= 128, QQIC represents a floating-point value as follows: + * + * 0 1 2 3 4 5 6 7 + * +-+-+-+-+-+-+-+-+ + * |1| exp | mant | + * +-+-+-+-+-+-+-+-+ + */ +#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) +#define MLDV2_QQIC_MAN(value) ((value) & 0x0f) + +static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) +{ + /* RFC3810, 5.1.3. Maximum Response Code */ + unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); + + if (mc_mrc < 32768) { + ret = mc_mrc; + } else { + unsigned long mc_man, mc_exp; + + mc_exp = MLDV2_MRC_EXP(mc_mrc); + mc_man = MLDV2_MRC_MAN(mc_mrc); + + ret = (mc_man | 0x1000) << (mc_exp + 3); + } + + return ret; +} + +#endif diff --git a/include/net/mrp.h b/include/net/mrp.h new file mode 100644 index 00000000000..31912c3be77 --- /dev/null +++ b/include/net/mrp.h @@ -0,0 +1,142 @@ +#ifndef _NET_MRP_H +#define _NET_MRP_H + +#define MRP_END_MARK 0x0 + +struct mrp_pdu_hdr { + u8 version; +}; + +struct mrp_msg_hdr { + u8 attrtype; + u8 attrlen; +}; + +struct mrp_vecattr_hdr { + __be16 lenflags; + unsigned char firstattrvalue[]; +#define MRP_VECATTR_HDR_LEN_MASK cpu_to_be16(0x1FFF) +#define MRP_VECATTR_HDR_FLAG_LA cpu_to_be16(0x2000) +}; + +enum mrp_vecattr_event { + MRP_VECATTR_EVENT_NEW, + MRP_VECATTR_EVENT_JOIN_IN, + MRP_VECATTR_EVENT_IN, + MRP_VECATTR_EVENT_JOIN_MT, + MRP_VECATTR_EVENT_MT, + MRP_VECATTR_EVENT_LV, + __MRP_VECATTR_EVENT_MAX +}; + +struct mrp_skb_cb { + struct mrp_msg_hdr *mh; + struct mrp_vecattr_hdr *vah; + unsigned char attrvalue[]; +}; + +static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct mrp_skb_cb) > + FIELD_SIZEOF(struct sk_buff, cb)); + return (struct mrp_skb_cb *)skb->cb; +} + +enum mrp_applicant_state { + MRP_APPLICANT_INVALID, + MRP_APPLICANT_VO, + MRP_APPLICANT_VP, + MRP_APPLICANT_VN, + MRP_APPLICANT_AN, + MRP_APPLICANT_AA, + MRP_APPLICANT_QA, + MRP_APPLICANT_LA, + MRP_APPLICANT_AO, + MRP_APPLICANT_QO, + MRP_APPLICANT_AP, + MRP_APPLICANT_QP, + __MRP_APPLICANT_MAX +}; +#define MRP_APPLICANT_MAX (__MRP_APPLICANT_MAX - 1) + +enum mrp_event { + MRP_EVENT_NEW, + MRP_EVENT_JOIN, + MRP_EVENT_LV, + MRP_EVENT_TX, + MRP_EVENT_R_NEW, + MRP_EVENT_R_JOIN_IN, + MRP_EVENT_R_IN, + MRP_EVENT_R_JOIN_MT, + MRP_EVENT_R_MT, + MRP_EVENT_R_LV, + MRP_EVENT_R_LA, + MRP_EVENT_REDECLARE, + MRP_EVENT_PERIODIC, + __MRP_EVENT_MAX +}; +#define MRP_EVENT_MAX (__MRP_EVENT_MAX - 1) + +enum mrp_tx_action { + MRP_TX_ACTION_NONE, + MRP_TX_ACTION_S_NEW, + MRP_TX_ACTION_S_JOIN_IN, + MRP_TX_ACTION_S_JOIN_IN_OPTIONAL, + MRP_TX_ACTION_S_IN_OPTIONAL, + MRP_TX_ACTION_S_LV, +}; + +struct mrp_attr { + struct rb_node node; + enum mrp_applicant_state state; + u8 type; + u8 len; + unsigned char value[]; +}; + +enum mrp_applications { + MRP_APPLICATION_MVRP, + __MRP_APPLICATION_MAX +}; +#define MRP_APPLICATION_MAX (__MRP_APPLICATION_MAX - 1) + +struct mrp_application { + enum mrp_applications type; + unsigned int maxattr; + struct packet_type pkttype; + unsigned char group_address[ETH_ALEN]; + u8 version; +}; + +struct mrp_applicant { + struct mrp_application *app; + struct net_device *dev; + struct timer_list join_timer; + struct timer_list periodic_timer; + + spinlock_t lock; + struct sk_buff_head queue; + struct sk_buff *pdu; + struct rb_root mad; + struct rcu_head rcu; +}; + +struct mrp_port { + struct mrp_applicant __rcu *applicants[MRP_APPLICATION_MAX + 1]; + struct rcu_head rcu; +}; + +int mrp_register_application(struct mrp_application *app); +void mrp_unregister_application(struct mrp_application *app); + +int mrp_init_applicant(struct net_device *dev, struct mrp_application *app); +void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app); + +int mrp_request_join(const struct net_device *dev, + const struct mrp_application *app, + const void *value, u8 len, u8 type); +void mrp_request_leave(const struct net_device *dev, + const struct mrp_application *app, + const void *value, u8 len, u8 type); + +#endif /* _NET_MRP_H */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 59b70624b05..6bbda34d5e5 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -12,6 +12,15 @@ #define NDISC_REDIRECT 137 /* + * Router type: cross-layer information from link-layer to + * IPv6 layer reported by certain link types (e.g., RFC4214). + */ +#define NDISC_NODETYPE_UNSPEC 0 /* unspecified (default) */ +#define NDISC_NODETYPE_HOST 1 /* host or unauthorized router */ +#define NDISC_NODETYPE_NODEFAULT 2 /* non-default router */ +#define NDISC_NODETYPE_DEFAULT 3 /* default router */ + +/* * ndisc options */ @@ -25,6 +34,7 @@ enum { __ND_OPT_ARRAY_MAX, ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ ND_OPT_RDNSS = 25, /* RFC5006 */ + ND_OPT_DNSSL = 31, /* RFC6106 */ __ND_OPT_MAX }; @@ -33,20 +43,17 @@ enum { #define ND_REACHABLE_TIME (30*HZ) #define ND_RETRANS_TIMER HZ -#define ND_MIN_RANDOM_FACTOR (1/2) -#define ND_MAX_RANDOM_FACTOR (3/2) - -#ifdef __KERNEL__ - #include <linux/compiler.h> #include <linux/icmpv6.h> #include <linux/in6.h> #include <linux/types.h> +#include <linux/if_arp.h> +#include <linux/netdevice.h> +#include <linux/hash.h> #include <net/neighbour.h> struct ctl_table; -struct file; struct inet6_dev; struct net_device; struct net_proto_family; @@ -71,75 +78,162 @@ struct ra_msg { __be32 retrans_timer; }; +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; -} __attribute__((__packed__)); +} __packed; + +/* ND options */ +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX]; +#ifdef CONFIG_IPV6_ROUTE_INFO + struct nd_opt_hdr *nd_opts_ri; + struct nd_opt_hdr *nd_opts_ri_end; +#endif + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] +#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR] +#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO] +#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END] +#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR] +#define nd_opts_mtu nd_opt_array[ND_OPT_MTU] +#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7) -extern int ndisc_init(struct net_proto_family *ops); +struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, + struct ndisc_options *ndopts); + +/* + * Return the padding between the option length and the start of the + * link addr. Currently only IP-over-InfiniBand needs this, although + * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may + * also need a pad of 2. + */ +static inline int ndisc_addr_option_pad(unsigned short type) +{ + switch (type) { + case ARPHRD_INFINIBAND: return 2; + default: return 0; + } +} + +static inline int ndisc_opt_addr_space(struct net_device *dev) +{ + return NDISC_OPT_SPACE(dev->addr_len + + ndisc_addr_option_pad(dev->type)); +} + +static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p, + struct net_device *dev) +{ + u8 *lladdr = (u8 *)(p + 1); + int lladdrlen = p->nd_opt_len << 3; + int prepad = ndisc_addr_option_pad(dev->type); + if (lladdrlen != ndisc_opt_addr_space(dev)) + return NULL; + return lladdr + prepad; +} -extern void ndisc_cleanup(void); +static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) +{ + const u32 *p32 = pkey; -extern int ndisc_rcv(struct sk_buff *skb); + return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) + + (p32[1] * hash_rnd[1]) + + (p32[2] * hash_rnd[2]) + + (p32[3] * hash_rnd[3])); +} -extern void ndisc_send_ns(struct net_device *dev, - struct neighbour *neigh, - struct in6_addr *solicit, - struct in6_addr *daddr, - struct in6_addr *saddr); +static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey) +{ + struct neigh_hash_table *nht; + const u32 *p32 = pkey; + struct neighbour *n; + u32 hash_val; + + nht = rcu_dereference_bh(nd_tbl.nht); + hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + u32 *n32 = (u32 *) n->primary_key; + if (n->dev == dev && + ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | + (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) + return n; + } -extern void ndisc_send_rs(struct net_device *dev, - struct in6_addr *saddr, - struct in6_addr *daddr); + return NULL; +} -extern void ndisc_forwarding_on(void); -extern void ndisc_forwarding_off(void); +static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey) +{ + struct neighbour *n; -extern void ndisc_send_redirect(struct sk_buff *skb, - struct neighbour *neigh, - struct in6_addr *target); + rcu_read_lock_bh(); + n = __ipv6_neigh_lookup_noref(dev, pkey); + if (n && !atomic_inc_not_zero(&n->refcnt)) + n = NULL; + rcu_read_unlock_bh(); -extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir); + return n; +} +int ndisc_init(void); +int ndisc_late_init(void); +void ndisc_late_cleanup(void); +void ndisc_cleanup(void); -/* - * IGMP - */ -extern int igmp6_init(struct net_proto_family *ops); +int ndisc_rcv(struct sk_buff *skb); -extern void igmp6_cleanup(void); +void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *solicit, + const struct in6_addr *daddr, const struct in6_addr *saddr); -extern int igmp6_event_query(struct sk_buff *skb); +void ndisc_send_rs(struct net_device *dev, + const struct in6_addr *saddr, const struct in6_addr *daddr); +void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, bool inc_opt); -extern int igmp6_event_report(struct sk_buff *skb); +void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target); -extern void igmp6_cleanup(void); +int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, + int dir); -#ifdef CONFIG_SYSCTL -extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, - int write, - struct file * filp, - void __user *buffer, - size_t *lenp, - loff_t *ppos); -#endif -extern void inet6_ifinfo_notify(int event, - struct inet6_dev *idev); +/* + * IGMP + */ +int igmp6_init(void); -static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, struct in6_addr *addr) -{ +void igmp6_cleanup(void); - if (dev) - return __neigh_lookup(&nd_tbl, addr, dev, 1); +int igmp6_event_query(struct sk_buff *skb); - return NULL; -} +int igmp6_event_report(struct sk_buff *skb); -#endif /* __KERNEL__ */ +#ifdef CONFIG_SYSCTL +int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen); +#endif +void inet6_ifinfo_notify(int event, struct inet6_dev *idev); #endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 64a5f0120b5..47f425464f8 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -16,14 +16,16 @@ * - Add neighbour cache statistics like rtstat */ -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> +#include <linux/bitmap.h> #include <linux/err.h> #include <linux/sysctl.h> +#include <linux/workqueue.h> #include <net/rtnetlink.h> /* @@ -36,9 +38,36 @@ struct neighbour; -struct neigh_parms -{ +enum { + NEIGH_VAR_MCAST_PROBES, + NEIGH_VAR_UCAST_PROBES, + NEIGH_VAR_APP_PROBES, + NEIGH_VAR_RETRANS_TIME, + NEIGH_VAR_BASE_REACHABLE_TIME, + NEIGH_VAR_DELAY_PROBE_TIME, + NEIGH_VAR_GC_STALETIME, + NEIGH_VAR_QUEUE_LEN_BYTES, + NEIGH_VAR_PROXY_QLEN, + NEIGH_VAR_ANYCAST_DELAY, + NEIGH_VAR_PROXY_DELAY, + NEIGH_VAR_LOCKTIME, +#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) + /* Following are used as a second way to access one of the above */ + NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ + NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ + NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ + /* Following are used by "default" only */ + NEIGH_VAR_GC_INTERVAL, + NEIGH_VAR_GC_THRESH1, + NEIGH_VAR_GC_THRESH2, + NEIGH_VAR_GC_THRESH3, + NEIGH_VAR_MAX +}; + +struct neigh_parms { +#ifdef CONFIG_NET_NS struct net *net; +#endif struct net_device *dev; struct neigh_parms *next; int (*neigh_setup)(struct neighbour *); @@ -51,29 +80,41 @@ struct neigh_parms atomic_t refcnt; struct rcu_head rcu_head; - int base_reachable_time; - int retrans_time; - int gc_staletime; int reachable_time; - int delay_probe_time; - - int queue_len; - int ucast_probes; - int app_probes; - int mcast_probes; - int anycast_delay; - int proxy_delay; - int proxy_qlen; - int locktime; + int data[NEIGH_VAR_DATA_MAX]; + DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); }; -struct neigh_statistics +static inline void neigh_var_set(struct neigh_parms *p, int index, int val) +{ + set_bit(index, p->data_state); + p->data[index] = val; +} + +#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) + +/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. + * In other cases, NEIGH_VAR_SET should be used. + */ +#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) +#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) + +static inline void neigh_parms_data_state_setall(struct neigh_parms *p) { + bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); +} + +static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) +{ + bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); +} + +struct neigh_statistics { unsigned long allocs; /* number of allocated neighs */ unsigned long destroys; /* number of destroyed neighs */ unsigned long hash_grows; /* number of hash resizes */ - unsigned long res_failed; /* nomber of failed resolutions */ + unsigned long res_failed; /* number of failed resolutions */ unsigned long lookups; /* number of lookups */ unsigned long hits; /* number of hits (among lookups) */ @@ -83,55 +124,52 @@ struct neigh_statistics unsigned long periodic_gc_runs; /* number of periodic GC runs */ unsigned long forced_gc_runs; /* number of forced GC runs */ + + unsigned long unres_discards; /* number of unresolved drops */ }; -#define NEIGH_CACHE_STAT_INC(tbl, field) \ - do { \ - preempt_disable(); \ - (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \ - preempt_enable(); \ - } while (0) +#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) -struct neighbour -{ - struct neighbour *next; +struct neighbour { + struct neighbour __rcu *next; struct neigh_table *tbl; struct neigh_parms *parms; - struct net_device *dev; - unsigned long used; unsigned long confirmed; unsigned long updated; + rwlock_t lock; + atomic_t refcnt; + struct sk_buff_head arp_queue; + unsigned int arp_queue_len_bytes; + struct timer_list timer; + unsigned long used; + atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; - atomic_t probes; - rwlock_t lock; + seqlock_t ha_lock; unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; - struct hh_cache *hh; - atomic_t refcnt; - int (*output)(struct sk_buff *skb); - struct sk_buff_head arp_queue; - struct timer_list timer; - struct neigh_ops *ops; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct rcu_head rcu; + struct net_device *dev; u8 primary_key[0]; }; -struct neigh_ops -{ +struct neigh_ops { int family; - void (*solicit)(struct neighbour *, struct sk_buff*); - void (*error_report)(struct neighbour *, struct sk_buff*); - int (*output)(struct sk_buff*); - int (*connected_output)(struct sk_buff*); - int (*hh_output)(struct sk_buff*); - int (*queue_xmit)(struct sk_buff*); + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); }; -struct pneigh_entry -{ +struct pneigh_entry { struct pneigh_entry *next; +#ifdef CONFIG_NET_NS struct net *net; +#endif struct net_device *dev; u8 flags; u8 key[0]; @@ -141,44 +179,59 @@ struct pneigh_entry * neighbour table manipulation */ +#define NEIGH_NUM_HASH_RND 4 -struct neigh_table -{ +struct neigh_hash_table { + struct neighbour __rcu **hash_buckets; + unsigned int hash_shift; + __u32 hash_rnd[NEIGH_NUM_HASH_RND]; + struct rcu_head rcu; +}; + + +struct neigh_table { struct neigh_table *next; int family; int entry_size; int key_len; - __u32 (*hash)(const void *pkey, const struct net_device *); + __u32 (*hash)(const void *pkey, + const struct net_device *dev, + __u32 *hash_rnd); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; - /* HACK. gc_* shoul follow parms without a gap! */ int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; - struct timer_list gc_timer; + struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; - struct kmem_cache *kmem_cachep; - struct neigh_statistics *stats; - struct neighbour **hash_buckets; - unsigned int hash_mask; - __u32 hash_rnd; - unsigned int hash_chain_gc; + struct neigh_statistics __percpu *stats; + struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *pde; -#endif }; +static inline int neigh_parms_family(struct neigh_parms *p) +{ + return p->tbl->family; +} + +#define NEIGH_PRIV_ALIGN sizeof(long long) +#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) + +static inline void *neighbour_priv(const struct neighbour *n) +{ + return (char *)n + n->tbl->entry_size; +} + /* flags for neigh_update() */ #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 @@ -186,52 +239,73 @@ struct neigh_table #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 -extern void neigh_table_init(struct neigh_table *tbl); -extern void neigh_table_init_no_netlink(struct neigh_table *tbl); -extern int neigh_table_clear(struct neigh_table *tbl); -extern struct neighbour * neigh_lookup(struct neigh_table *tbl, - const void *pkey, - struct net_device *dev); -extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl, - struct net *net, - const void *pkey); -extern struct neighbour * neigh_create(struct neigh_table *tbl, +void neigh_table_init(struct neigh_table *tbl); +int neigh_table_clear(struct neigh_table *tbl); +struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, + struct net_device *dev); +struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, + const void *pkey); +struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, + struct net_device *dev, bool want_ref); +static inline struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, - struct net_device *dev); -extern void neigh_destroy(struct neighbour *neigh); -extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); -extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, - u32 flags); -extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); -extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); -extern int neigh_resolve_output(struct sk_buff *skb); -extern int neigh_connected_output(struct sk_buff *skb); -extern int neigh_compat_output(struct sk_buff *skb); -extern struct neighbour *neigh_event_ns(struct neigh_table *tbl, + struct net_device *dev) +{ + return __neigh_create(tbl, pkey, dev, true); +} +void neigh_destroy(struct neighbour *neigh); +int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); +int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); +void __neigh_set_probe_once(struct neighbour *neigh); +void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); +int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); +struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev); -extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); -extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); -extern unsigned long neigh_rand_reach_time(unsigned long base); +struct neigh_parms *neigh_parms_alloc(struct net_device *dev, + struct neigh_table *tbl); +void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); -extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, - struct sk_buff *skb); -extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat); -extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, - struct net *net, - const void *key, - struct net_device *dev); -extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); +static inline +struct net *neigh_parms_net(const struct neigh_parms *parms) +{ + return read_pnet(&parms->net); +} + +unsigned long neigh_rand_reach_time(unsigned long base); -extern void neigh_app_ns(struct neighbour *n); -extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); -extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); -extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); +void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, + struct sk_buff *skb); +struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev, + int creat); +struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev); +int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, + struct net_device *dev); + +static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) +{ + return read_pnet(&pneigh->net); +} + +void neigh_app_ns(struct neighbour *n); +void neigh_for_each(struct neigh_table *tbl, + void (*cb)(struct neighbour *, void *), void *cookie); +void __neigh_for_each_release(struct neigh_table *tbl, + int (*cb)(struct neighbour *)); +void pneigh_for_each(struct neigh_table *tbl, + void (*cb)(struct pneigh_entry *)); struct neigh_seq_state { struct seq_net_private p; struct neigh_table *tbl; + struct neigh_hash_table *nht; void *(*neigh_sub_iter)(struct neigh_seq_state *state, struct neighbour *n, loff_t *pos); unsigned int bucket; @@ -240,17 +314,23 @@ struct neigh_seq_state { #define NEIGH_SEQ_IS_PNEIGH 0x00000002 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 }; -extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int); -extern void *neigh_seq_next(struct seq_file *, void *, loff_t *); -extern void neigh_seq_stop(struct seq_file *, void *); - -extern int neigh_sysctl_register(struct net_device *dev, - struct neigh_parms *p, - int p_id, int pdev_id, - char *p_name, - proc_handler *proc_handler, - ctl_handler *strategy); -extern void neigh_sysctl_unregister(struct neigh_parms *p); +void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, + unsigned int); +void *neigh_seq_next(struct seq_file *, void *, loff_t *); +void neigh_seq_stop(struct seq_file *, void *); + +int neigh_proc_dointvec(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); + +int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, + proc_handler *proc_handler); +void neigh_sysctl_unregister(struct neigh_parms *p); static inline void __neigh_parms_put(struct neigh_parms *parms) { @@ -282,42 +362,51 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh) #define neigh_hold(n) atomic_inc(&(n)->refcnt) -static inline void neigh_confirm(struct neighbour *neigh) +static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { - if (neigh) - neigh->confirmed = jiffies; + unsigned long now = jiffies; + + if (neigh->used != now) + neigh->used = now; + if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) + return __neigh_event_send(neigh, skb); + return 0; } -static inline int neigh_is_connected(struct neighbour *neigh) +#ifdef CONFIG_BRIDGE_NETFILTER +static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { - return neigh->nud_state&NUD_CONNECTED; -} - + unsigned int seq, hh_alen; -static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) -{ - neigh->used = jiffies; - if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) - return __neigh_event_send(neigh, skb); + do { + seq = read_seqbegin(&hh->hh_lock); + hh_alen = HH_DATA_ALIGN(ETH_HLEN); + memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); + } while (read_seqretry(&hh->hh_lock, seq)); return 0; } +#endif -static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { - unsigned seq; + unsigned int seq; int hh_len; do { - int hh_alen; - seq = read_seqbegin(&hh->hh_lock); hh_len = hh->hh_len; - hh_alen = HH_DATA_ALIGN(hh_len); - memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); + if (likely(hh_len <= HH_DATA_MOD)) { + /* this is inlined by gcc */ + memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); + } else { + int hh_alen = HH_DATA_ALIGN(hh_len); + + memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); + } } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh_len); - return hh->hh_output(skb); + return dev_queue_xmit(skb); } static inline struct neighbour * @@ -353,4 +442,14 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) +static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, + const struct net_device *dev) +{ + unsigned int seq; + + do { + seq = read_seqbegin(&n->ha_lock); + memcpy(dst, n->ha, dev->addr_len); + } while (read_seqretry(&n->ha_lock, seq)); +} #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 923f2b8b909..361d2607719 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -4,85 +4,171 @@ #ifndef __NET_NET_NAMESPACE_H #define __NET_NET_NAMESPACE_H -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/workqueue.h> #include <linux/list.h> +#include <linux/sysctl.h> +#include <net/flow.h> +#include <net/netns/core.h> +#include <net/netns/mib.h> #include <net/netns/unix.h> #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> +#include <net/netns/ieee802154_6lowpan.h> +#include <net/netns/sctp.h> +#include <net/netns/dccp.h> +#include <net/netns/netfilter.h> #include <net/netns/x_tables.h> +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +#include <net/netns/conntrack.h> +#endif +#include <net/netns/nftables.h> +#include <net/netns/xfrm.h> +struct user_namespace; struct proc_dir_entry; struct net_device; struct sock; struct ctl_table_header; +struct net_generic; +struct sock; +struct netns_ipvs; + + +#define NETDEV_HASHBITS 8 +#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) struct net { + atomic_t passive; /* To decided when the network + * namespace should be freed. + */ atomic_t count; /* To decided when the network - * namespace should be freed. + * namespace should be shut down. */ +#ifdef NETNS_REFCNT_DEBUG atomic_t use_count; /* To track references we * destroy on demand */ +#endif + spinlock_t rules_mod_lock; + struct list_head list; /* list of network namespaces */ - struct work_struct work; /* work struct for freeing */ + struct list_head cleanup_list; /* namespaces on death row */ + struct list_head exit_list; /* Use only net_mutex */ + + struct user_namespace *user_ns; /* Owning user namespace */ + + unsigned int proc_inum; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; - struct list_head sysctl_table_headers; +#ifdef CONFIG_SYSCTL + struct ctl_table_set sysctls; +#endif - struct net_device *loopback_dev; /* The loopback */ + struct sock *rtnl; /* rtnetlink socket */ + struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; + unsigned int dev_base_seq; /* protected by rtnl_mutex */ + int ifindex; + unsigned int dev_unreg_count; /* core fib_rules */ struct list_head rules_ops; - spinlock_t rules_mod_lock; - - struct sock *rtnl; /* rtnetlink socket */ - /* core sysctls */ - struct ctl_table_header *sysctl_core_hdr; - int sysctl_somaxconn; + struct net_device *loopback_dev; /* The loopback */ + struct netns_core core; + struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif +#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) + struct netns_ieee802154_lowpan ieee802154_lowpan; +#endif +#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) + struct netns_sctp sctp; +#endif +#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) + struct netns_dccp dccp; +#endif #ifdef CONFIG_NETFILTER + struct netns_nf nf; struct netns_xt xt; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct netns_ct ct; +#endif +#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) + struct netns_nftables nft; +#endif +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) + struct netns_nf_frag nf_frag; +#endif + struct sock *nfnl; + struct sock *nfnl_stash; #endif +#ifdef CONFIG_WEXT_CORE + struct sk_buff_head wext_nlevents; +#endif + struct net_generic __rcu *gen; + + /* Note : following structs are cache line aligned */ +#ifdef CONFIG_XFRM + struct netns_xfrm xfrm; +#endif +#if IS_ENABLED(CONFIG_IP_VS) + struct netns_ipvs *ipvs; +#endif + struct sock *diag_nlsk; + atomic_t fnhe_genid; }; -#ifdef CONFIG_NET +#include <linux/seq_file_net.h> + /* Init's network namespace */ extern struct net init_net; -#define INIT_NET_NS(net_ns) .net_ns = &init_net, -#else -#define INIT_NET_NS(net_ns) -#endif -extern struct list_head net_namespace_list; +#ifdef CONFIG_NET_NS +struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, + struct net *old_net); -#ifdef CONFIG_NET -extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns); -#else -static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns) +#else /* CONFIG_NET_NS */ +#include <linux/sched.h> +#include <linux/nsproxy.h> +static inline struct net *copy_net_ns(unsigned long flags, + struct user_namespace *user_ns, struct net *old_net) { - /* There is nothing to copy so this is a noop */ - return net_ns; + if (flags & CLONE_NEWNET) + return ERR_PTR(-EINVAL); + return old_net; } +#endif /* CONFIG_NET_NS */ + + +extern struct list_head net_namespace_list; + +struct net *get_net_ns_by_pid(pid_t pid); +struct net *get_net_ns_by_fd(int pid); + +#ifdef CONFIG_SYSCTL +void ipx_register_sysctl(void); +void ipx_unregister_sysctl(void); +#else +#define ipx_register_sysctl() +#define ipx_unregister_sysctl() #endif #ifdef CONFIG_NET_NS -extern void __put_net(struct net *net); +void __put_net(struct net *net); static inline struct net *get_net(struct net *net) { @@ -108,17 +194,16 @@ static inline void put_net(struct net *net) __put_net(net); } -static inline struct net *hold_net(struct net *net) +static inline +int net_eq(const struct net *net1, const struct net *net2) { - atomic_inc(&net->use_count); - return net; + return net1 == net2; } -static inline void release_net(struct net *net) -{ - atomic_dec(&net->use_count); -} +void net_drop_ns(void *); + #else + static inline struct net *get_net(struct net *net) { return net; @@ -128,50 +213,189 @@ static inline void put_net(struct net *net) { } +static inline struct net *maybe_get_net(struct net *net) +{ + return net; +} + +static inline +int net_eq(const struct net *net1, const struct net *net2) +{ + return 1; +} + +#define net_drop_ns NULL +#endif + + +#ifdef NETNS_REFCNT_DEBUG static inline struct net *hold_net(struct net *net) { + if (net) + atomic_inc(&net->use_count); return net; } static inline void release_net(struct net *net) { + if (net) + atomic_dec(&net->use_count); } - -static inline struct net *maybe_get_net(struct net *net) +#else +static inline struct net *hold_net(struct net *net) { return net; } + +static inline void release_net(struct net *net) +{ +} +#endif + +#ifdef CONFIG_NET_NS + +static inline void write_pnet(struct net **pnet, struct net *net) +{ + *pnet = net; +} + +static inline struct net *read_pnet(struct net * const *pnet) +{ + return *pnet; +} + +#else + +#define write_pnet(pnet, net) do { (void)(net);} while (0) +#define read_pnet(pnet) (&init_net) + #endif #define for_each_net(VAR) \ list_for_each_entry(VAR, &net_namespace_list, list) +#define for_each_net_rcu(VAR) \ + list_for_each_entry_rcu(VAR, &net_namespace_list, list) + #ifdef CONFIG_NET_NS #define __net_init #define __net_exit #define __net_initdata +#define __net_initconst #else #define __net_init __init #define __net_exit __exit_refok #define __net_initdata __initdata +#define __net_initconst __initconst #endif struct pernet_operations { struct list_head list; int (*init)(struct net *net); void (*exit)(struct net *net); + void (*exit_batch)(struct list_head *net_exit_list); + int *id; + size_t size; }; -extern int register_pernet_subsys(struct pernet_operations *); -extern void unregister_pernet_subsys(struct pernet_operations *); -extern int register_pernet_device(struct pernet_operations *); -extern void unregister_pernet_device(struct pernet_operations *); +/* + * Use these carefully. If you implement a network device and it + * needs per network namespace operations use device pernet operations, + * otherwise use pernet subsys operations. + * + * Network interfaces need to be removed from a dying netns _before_ + * subsys notifiers can be called, as most of the network code cleanup + * (which is done from subsys notifiers) runs with the assumption that + * dev_remove_pack has been called so no new packets will arrive during + * and after the cleanup functions have been called. dev_remove_pack + * is not per namespace so instead the guarantee of no more packets + * arriving in a network namespace is provided by ensuring that all + * network devices and all sockets have left the network namespace + * before the cleanup methods are called. + * + * For the longest time the ipv4 icmp code was registered as a pernet + * device which caused kernel oops, and panics during network + * namespace cleanup. So please don't get this wrong. + */ +int register_pernet_subsys(struct pernet_operations *); +void unregister_pernet_subsys(struct pernet_operations *); +int register_pernet_device(struct pernet_operations *); +void unregister_pernet_device(struct pernet_operations *); -struct ctl_path; struct ctl_table; struct ctl_table_header; -extern struct ctl_table_header *register_net_sysctl_table(struct net *net, - const struct ctl_path *path, struct ctl_table *table); -extern void unregister_net_sysctl_table(struct ctl_table_header *header); + +#ifdef CONFIG_SYSCTL +int net_sysctl_init(void); +struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, + struct ctl_table *table); +void unregister_net_sysctl_table(struct ctl_table_header *header); +#else +static inline int net_sysctl_init(void) { return 0; } +static inline struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table) +{ + return NULL; +} +static inline void unregister_net_sysctl_table(struct ctl_table_header *header) +{ +} +#endif + +static inline int rt_genid_ipv4(struct net *net) +{ + return atomic_read(&net->ipv4.rt_genid); +} + +static inline void rt_genid_bump_ipv4(struct net *net) +{ + atomic_inc(&net->ipv4.rt_genid); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline int rt_genid_ipv6(struct net *net) +{ + return atomic_read(&net->ipv6.rt_genid); +} + +static inline void rt_genid_bump_ipv6(struct net *net) +{ + atomic_inc(&net->ipv6.rt_genid); +} +#else +static inline int rt_genid_ipv6(struct net *net) +{ + return 0; +} + +static inline void rt_genid_bump_ipv6(struct net *net) +{ +} +#endif + +#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) +static inline struct netns_ieee802154_lowpan * +net_ieee802154_lowpan(struct net *net) +{ + return &net->ieee802154_lowpan; +} +#endif + +/* For callers who don't really care about whether it's IPv4 or IPv6 */ +static inline void rt_genid_bump_all(struct net *net) +{ + rt_genid_bump_ipv4(net); + rt_genid_bump_ipv6(net); +} + +static inline int fnhe_genid(struct net *net) +{ + return atomic_read(&net->fnhe_genid); +} + +static inline void fnhe_genid_bump(struct net *net) +{ + atomic_inc(&net->fnhe_genid); +} #endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h new file mode 100644 index 00000000000..7727b4247da --- /dev/null +++ b/include/net/net_ratelimit.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_NET_RATELIMIT_H +#define _LINUX_NET_RATELIMIT_H + +#include <linux/ratelimit.h> + +extern struct ratelimit_state net_ratelimit_state; + +#endif /* _LINUX_NET_RATELIMIT_H */ diff --git a/include/net/netdma.h b/include/net/netdma.h index f28c6e064e8..8ba8ce284ee 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h @@ -24,17 +24,6 @@ #include <linux/dmaengine.h> #include <linux/skbuff.h> -static inline struct dma_chan *get_softnet_dma(void) -{ - struct dma_chan *chan; - rcu_read_lock(); - chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); - if (chan) - dma_chan_get(chan); - rcu_read_unlock(); - return chan; -} - int dma_skb_copy_datagram_iovec(struct dma_chan* chan, struct sk_buff *skb, int offset, struct iovec *to, size_t len, struct dma_pinned_list *pinned_list); diff --git a/include/net/netevent.h b/include/net/netevent.h index e82b7bab3ff..d8bbb38584b 100644 --- a/include/net/netevent.h +++ b/include/net/netevent.h @@ -10,24 +10,24 @@ * * Changes: */ -#ifdef __KERNEL__ struct dst_entry; +struct neighbour; struct netevent_redirect { struct dst_entry *old; struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; }; enum netevent_notif_type { NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ - NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */ NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ }; -extern int register_netevent_notifier(struct notifier_block *nb); -extern int unregister_netevent_notifier(struct notifier_block *nb); -extern int call_netevent_notifiers(unsigned long val, void *v); +int register_netevent_notifier(struct notifier_block *nb); +int unregister_netevent_notifier(struct notifier_block *nb); +int call_netevent_notifiers(unsigned long val, void *v); #endif -#endif diff --git a/include/net/netfilter/ipv4/nf_conntrack_icmp.h b/include/net/netfilter/ipv4/nf_conntrack_icmp.h deleted file mode 100644 index 3dd22cff23e..00000000000 --- a/include/net/netfilter/ipv4/nf_conntrack_icmp.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _NF_CONNTRACK_ICMP_H -#define _NF_CONNTRACK_ICMP_H -/* ICMP tracking. */ -#include <asm/atomic.h> - -struct ip_ct_icmp -{ - /* Optimization: when number in == number out, forget immediately. */ - atomic_t count; -}; -#endif /* _NF_CONNTRACK_ICMP_H */ diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 9bf059817ae..981c327374d 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -9,8 +9,6 @@ #ifndef _NF_CONNTRACK_IPV4_H #define _NF_CONNTRACK_IPV4_H -/* Returns new sk_buff, or NULL */ -struct sk_buff *nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb); extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; @@ -18,9 +16,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; -extern int nf_conntrack_ipv4_compat_init(void); -extern void nf_conntrack_ipv4_compat_fini(void); - -extern void need_ipv4_conntrack(void); +int nf_conntrack_ipv4_compat_init(void); +void nf_conntrack_ipv4_compat_fini(void); #endif /*_NF_CONNTRACK_IPV4_H*/ diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h new file mode 100644 index 00000000000..f01ef208dff --- /dev/null +++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h @@ -0,0 +1,6 @@ +#ifndef _NF_DEFRAG_IPV4_H +#define _NF_DEFRAG_IPV4_H + +void nf_defrag_ipv4_enable(void); + +#endif /* _NF_DEFRAG_IPV4_H */ diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h new file mode 100644 index 00000000000..931fbf81217 --- /dev/null +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -0,0 +1,128 @@ +#ifndef _IPV4_NF_REJECT_H +#define _IPV4_NF_REJECT_H + +#include <net/ip.h> +#include <net/tcp.h> +#include <net/route.h> +#include <net/dst.h> + +static inline void nf_send_unreach(struct sk_buff *skb_in, int code) +{ + icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); +} + +/* Send RST reply */ +static void nf_send_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct iphdr *oiph; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _otcph, *tcph; + + /* IP header checks: fragment. */ + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) + return; + + oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), + sizeof(_otcph), &_otcph); + if (oth == NULL) + return; + + /* No RST for RST. */ + if (oth->rst) + return; + + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + return; + + /* Check checksum */ + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) + return; + oiph = ip_hdr(oldskb); + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + + skb_reset_network_header(nskb); + niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); + niph->version = 4; + niph->ihl = sizeof(struct iphdr) / 4; + niph->tos = 0; + niph->id = 0; + niph->frag_off = htons(IP_DF); + niph->protocol = IPPROTO_TCP; + niph->check = 0; + niph->saddr = oiph->daddr; + niph->daddr = oiph->saddr; + + skb_reset_transport_header(nskb); + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); + memset(tcph, 0, sizeof(*tcph)); + tcph->source = oth->dest; + tcph->dest = oth->source; + tcph->doff = sizeof(struct tcphdr) / 4; + + if (oth->ack) + tcph->seq = oth->ack_seq; + else { + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + oldskb->len - ip_hdrlen(oldskb) - + (oth->doff << 2)); + tcph->ack = 1; + } + + tcph->rst = 1; + tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, + niph->daddr, 0); + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = (unsigned char *)tcph - nskb->head; + nskb->csum_offset = offsetof(struct tcphdr, check); + + /* ip_route_me_harder expects skb->dst to be set */ + skb_dst_set_noref(nskb, skb_dst(oldskb)); + + nskb->protocol = htons(ETH_P_IP); + if (ip_route_me_harder(nskb, RTN_UNSPEC)) + goto free_nskb; + + niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); + + /* "Never happens" */ + if (nskb->len > dst_mtu(skb_dst(nskb))) + goto free_nskb; + + nf_ct_attach(nskb, oldskb); + +#ifdef CONFIG_BRIDGE_NETFILTER + /* If we use ip_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + goto free_nskb; + dev_queue_xmit(nskb); + } else +#endif + ip_local_out(nskb); + + return; + + free_nskb: + kfree_skb(nskb); +} + + +#endif /* _IPV4_NF_REJECT_H */ diff --git a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h index 86591afda29..67edd50a398 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h @@ -9,7 +9,6 @@ #ifndef _NF_CONNTRACK_ICMPV6_H #define _NF_CONNTRACK_ICMPV6_H -#include <asm/atomic.h> #ifndef ICMPV6_NI_QUERY #define ICMPV6_NI_QUERY 139 @@ -18,10 +17,4 @@ #define ICMPV6_NI_REPLY 140 #endif -struct nf_ct_icmpv6 -{ - /* Optimization: when number in == number out, forget immediately. */ - atomic_t count; -}; - #endif /* _NF_CONNTRACK_ICMPV6_H */ diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index abc55ad75c2..a4c99368579 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -7,16 +7,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; -extern int nf_ct_frag6_init(void); -extern void nf_ct_frag6_cleanup(void); -extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); -extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, - struct net_device *in, - struct net_device *out, - int (*okfn)(struct sk_buff *)); - -struct inet_frags_ctl; - #include <linux/sysctl.h> extern struct ctl_table nf_ct_ipv6_sysctl_table[]; diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h new file mode 100644 index 00000000000..27666d8a0bd --- /dev/null +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -0,0 +1,13 @@ +#ifndef _NF_DEFRAG_IPV6_H +#define _NF_DEFRAG_IPV6_H + +void nf_defrag_ipv6_enable(void); + +int nf_ct_frag6_init(void); +void nf_ct_frag6_cleanup(void); +struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); +void nf_ct_frag6_consume_orig(struct sk_buff *skb); + +struct inet_frags_ctl; + +#endif /* _NF_DEFRAG_IPV6_H */ diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h new file mode 100644 index 00000000000..710d17ed70b --- /dev/null +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -0,0 +1,171 @@ +#ifndef _IPV6_NF_REJECT_H +#define _IPV6_NF_REJECT_H + +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/ip6_fib.h> +#include <net/ip6_checksum.h> +#include <linux/netfilter_ipv6.h> + +static inline void +nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, + unsigned int hooknum) +{ + if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) + skb_in->dev = net->loopback_dev; + + icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); +} + +/* Send RST reply */ +static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct tcphdr otcph, *tcph; + unsigned int otcplen, hh_len; + int tcphoff, needs_ack; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); + struct ipv6hdr *ip6h; +#define DEFAULT_TOS_VALUE 0x0U + const __u8 tclass = DEFAULT_TOS_VALUE; + struct dst_entry *dst = NULL; + u8 proto; + __be16 frag_off; + struct flowi6 fl6; + + if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || + (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { + pr_debug("addr is not unicast.\n"); + return; + } + + proto = oip6h->nexthdr; + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); + + if ((tcphoff < 0) || (tcphoff > oldskb->len)) { + pr_debug("Cannot get TCP header.\n"); + return; + } + + otcplen = oldskb->len - tcphoff; + + /* IP header checks: fragment, too short. */ + if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { + pr_debug("proto(%d) != IPPROTO_TCP, " + "or too short. otcplen = %d\n", + proto, otcplen); + return; + } + + if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) + BUG(); + + /* No RST for RST. */ + if (otcph.rst) { + pr_debug("RST is set\n"); + return; + } + + /* Check checksum. */ + if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { + pr_debug("TCP checksum is invalid\n"); + return; + } + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = oip6h->daddr; + fl6.daddr = oip6h->saddr; + fl6.fl6_sport = otcph.dest; + fl6.fl6_dport = otcph.source; + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); + dst = ip6_route_output(net, NULL, &fl6); + if (dst == NULL || dst->error) { + dst_release(dst); + return; + } + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); + if (IS_ERR(dst)) + return; + + hh_len = (dst->dev->hard_header_len + 15)&~15; + nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + + sizeof(struct tcphdr) + dst->trailer_len, + GFP_ATOMIC); + + if (!nskb) { + net_dbg_ratelimited("cannot alloc skb\n"); + dst_release(dst); + return; + } + + skb_dst_set(nskb, dst); + + skb_reserve(nskb, hh_len + dst->header_len); + + skb_put(nskb, sizeof(struct ipv6hdr)); + skb_reset_network_header(nskb); + ip6h = ipv6_hdr(nskb); + ip6_flow_hdr(ip6h, tclass, 0); + ip6h->hop_limit = ip6_dst_hoplimit(dst); + ip6h->nexthdr = IPPROTO_TCP; + ip6h->saddr = oip6h->daddr; + ip6h->daddr = oip6h->saddr; + + skb_reset_transport_header(nskb); + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); + /* Truncate to length (no data) */ + tcph->doff = sizeof(struct tcphdr)/4; + tcph->source = otcph.dest; + tcph->dest = otcph.source; + + if (otcph.ack) { + needs_ack = 0; + tcph->seq = otcph.ack_seq; + tcph->ack_seq = 0; + } else { + needs_ack = 1; + tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin + + otcplen - (otcph.doff<<2)); + tcph->seq = 0; + } + + /* Reset flags */ + ((u_int8_t *)tcph)[13] = 0; + tcph->rst = 1; + tcph->ack = needs_ack; + tcph->window = 0; + tcph->urg_ptr = 0; + tcph->check = 0; + + /* Adjust TCP checksum */ + tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, + &ipv6_hdr(nskb)->daddr, + sizeof(struct tcphdr), IPPROTO_TCP, + csum_partial(tcph, + sizeof(struct tcphdr), 0)); + + nf_ct_attach(nskb, oldskb); + +#ifdef CONFIG_BRIDGE_NETFILTER + /* If we use ip6_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + nskb->protocol = htons(ETH_P_IPV6); + ip6h->payload_len = htons(sizeof(struct tcphdr)); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + return; + dev_queue_xmit(nskb); + } else +#endif + ip6_local_out(nskb); +} + +#endif /* _IPV6_NF_REJECT_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 90b3e7f5df5..37252f71a38 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -14,15 +14,14 @@ #include <linux/netfilter/nf_conntrack_common.h> -#ifdef __KERNEL__ #include <linux/bitops.h> #include <linux/compiler.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/netfilter/nf_conntrack_tcp.h> +#include <linux/netfilter/nf_conntrack_dccp.h> #include <linux/netfilter/nf_conntrack_sctp.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> -#include <net/netfilter/ipv4/nf_conntrack_icmp.h> #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> #include <net/netfilter/nf_conntrack_tuple.h> @@ -30,10 +29,9 @@ /* per conntrack: protocol private data */ union nf_conntrack_proto { /* insert conntrack proto private data here */ + struct nf_ct_dccp dccp; struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; - struct ip_ct_icmp icmp; - struct nf_ct_icmpv6 icmpv6; struct nf_ct_gre gre; }; @@ -41,63 +39,52 @@ union nf_conntrack_expect_proto { /* insert expect proto private data here */ }; -/* Add protocol helper include file here */ -#include <linux/netfilter/nf_conntrack_ftp.h> -#include <linux/netfilter/nf_conntrack_pptp.h> -#include <linux/netfilter/nf_conntrack_h323.h> -#include <linux/netfilter/nf_conntrack_sane.h> - -/* per conntrack: application helper private data */ -union nf_conntrack_help { - /* insert conntrack helper private data (master) here */ - struct nf_ct_ftp_master ct_ftp_info; - struct nf_ct_pptp_master ct_pptp_info; - struct nf_ct_h323_master ct_h323_info; - struct nf_ct_sane_master ct_sane_info; -}; - #include <linux/types.h> #include <linux/skbuff.h> #include <linux/timer.h> #ifdef CONFIG_NETFILTER_DEBUG -#define NF_CT_ASSERT(x) \ -do { \ - if (!(x)) \ - /* Wooah! I'm tripping my conntrack in a frenzy of \ - netplay... */ \ - printk("NF_CT_ASSERT: %s:%i(%s)\n", \ - __FILE__, __LINE__, __FUNCTION__); \ -} while(0) +#define NF_CT_ASSERT(x) WARN_ON(!(x)) #else #define NF_CT_ASSERT(x) #endif struct nf_conntrack_helper; +/* Must be kept in sync with the classes defined by helpers */ +#define NF_CT_MAX_EXPECT_CLASSES 4 + /* nf_conn feature for connections that have a helper */ struct nf_conn_help { /* Helper. if any */ - struct nf_conntrack_helper *helper; - - union nf_conntrack_help help; + struct nf_conntrack_helper __rcu *helper; struct hlist_head expectations; /* Current number of expected connections */ - unsigned int expecting; -}; + u8 expecting[NF_CT_MAX_EXPECT_CLASSES]; + /* private helper information. */ + char data[]; +}; #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> -struct nf_conn -{ +struct nf_conn { /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, - plus 1 for any connection(s) we are `master' for */ + * plus 1 for any connection(s) we are `master' for + * + * Hint, SKB address this struct and refcnt via skb->nfct and + * helpers nf_conntrack_get() and nf_conntrack_put(). + * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt, + * beware nf_ct_get() is different and don't inc refcnt. + */ struct nf_conntrack ct_general; + spinlock_t lock; + u16 cpu; + /* XXX should I move this to the tail ? - Y.K */ /* These are my tuples; original and reply */ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; @@ -111,11 +98,6 @@ struct nf_conn /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; -#ifdef CONFIG_NF_CT_ACCT - /* Accounting Information (same cache line as other written members) */ - struct ip_conntrack_counter counters[IP_CT_DIR_MAX]; -#endif - #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif @@ -124,13 +106,14 @@ struct nf_conn u_int32_t secmark; #endif - /* Storage reserved for other modules: */ - union nf_conntrack_proto proto; - /* Extensions */ struct nf_ct_ext *ext; +#ifdef CONFIG_NET_NS + struct net *ct_net; +#endif - struct rcu_head rcu; + /* Storage reserved for other modules, must be the last member */ + union nf_conntrack_proto proto; }; static inline struct nf_conn * @@ -140,19 +123,36 @@ nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash) tuplehash[hash->tuple.dst.dir]); } +static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct) +{ + return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; +} + +static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct) +{ + return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; +} + +#define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple) + /* get master conntrack via master expectation */ #define master_ct(conntr) (conntr->master) +extern struct net init_net; + +static inline struct net *nf_ct_net(const struct nf_conn *ct) +{ + return read_pnet(&ct->ct_net); +} + /* Alter reply tuple (maybe alter helper). */ -extern void -nf_conntrack_alter_reply(struct nf_conn *ct, - const struct nf_conntrack_tuple *newreply); +void nf_conntrack_alter_reply(struct nf_conn *ct, + const struct nf_conntrack_tuple *newreply); /* Is this tuple taken? (ignoring any belonging to the given conntrack). */ -extern int -nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, - const struct nf_conn *ignored_conntrack); +int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, + const struct nf_conn *ignored_conntrack); /* Return conntrack_info and tuple hash for given skb. */ static inline struct nf_conn * @@ -170,32 +170,34 @@ static inline void nf_ct_put(struct nf_conn *ct) } /* Protocol module loading */ -extern int nf_ct_l3proto_try_module_get(unsigned short l3proto); -extern void nf_ct_l3proto_module_put(unsigned short l3proto); +int nf_ct_l3proto_try_module_get(unsigned short l3proto); +void nf_ct_l3proto_module_put(unsigned short l3proto); + +/* + * Allocate a hashtable of hlist_head (if nulls == 0), + * or hlist_nulls_head (if nulls == 1) + */ +void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); -extern struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced); -extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, - unsigned int size); +void nf_ct_free_hashtable(void *hash, unsigned int size); -extern struct nf_conntrack_tuple_hash * -__nf_conntrack_find(const struct nf_conntrack_tuple *tuple); +struct nf_conntrack_tuple_hash * +__nf_conntrack_find(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple); -extern void nf_conntrack_hash_insert(struct nf_conn *ct); +int nf_conntrack_hash_check_insert(struct nf_conn *ct); +bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); -extern void nf_conntrack_flush(void); +void nf_conntrack_flush_report(struct net *net, u32 portid, int report); -extern int nf_ct_get_tuplepr(const struct sk_buff *skb, - unsigned int nhoff, - u_int16_t l3num, - struct nf_conntrack_tuple *tuple); -extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig); +bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, + u_int16_t l3num, struct nf_conntrack_tuple *tuple); +bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig); -extern void __nf_ct_refresh_acct(struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies, - int do_acct); +void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + unsigned long extra_jiffies, int do_acct); /* Refresh conntrack for this many jiffies and do accounting */ static inline void nf_ct_refresh_acct(struct nf_conn *ct, @@ -214,23 +216,50 @@ static inline void nf_ct_refresh(struct nf_conn *ct, __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); } +bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, int do_acct); + +/* kill conntrack and do accounting */ +static inline bool nf_ct_kill_acct(struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb) +{ + return __nf_ct_kill_acct(ct, ctinfo, skb, 1); +} + +/* kill conntrack without accounting */ +static inline bool nf_ct_kill(struct nf_conn *ct) +{ + return __nf_ct_kill_acct(ct, 0, NULL, 0); +} + /* These are for NAT. Icky. */ -/* Update TCP window tracking data when NAT mangles the packet */ -extern void nf_conntrack_tcp_update(const struct sk_buff *skb, - unsigned int dataoff, - struct nf_conn *ct, - int dir); +extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); /* Fake conntrack entry for untracked connections */ -extern struct nf_conn nf_conntrack_untracked; +DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); +static inline struct nf_conn *nf_ct_untracked_get(void) +{ + return &__raw_get_cpu_var(nf_conntrack_untracked); +} +void nf_ct_untracked_status_or(unsigned long bits); /* Iterate over all conntracks: if iter returns true, it's deleted. */ -extern void -nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data); -extern void nf_conntrack_free(struct nf_conn *ct); -extern struct nf_conn * -nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_tuple *repl); +void nf_ct_iterate_cleanup(struct net *net, + int (*iter)(struct nf_conn *i, void *data), + void *data, u32 portid, int report); +void nf_conntrack_free(struct nf_conn *ct); +struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, + const struct nf_conntrack_tuple *orig, + const struct nf_conntrack_tuple *repl, + gfp_t gfp); + +static inline int nf_ct_is_template(const struct nf_conn *ct) +{ + return test_bit(IPS_TEMPLATE_BIT, &ct->status); +} /* It's confirmed if it is, or has been in the hash table. */ static inline int nf_ct_is_confirmed(struct nf_conn *ct) @@ -243,25 +272,31 @@ static inline int nf_ct_is_dying(struct nf_conn *ct) return test_bit(IPS_DYING_BIT, &ct->status); } -static inline int nf_ct_is_untracked(const struct sk_buff *skb) +static inline int nf_ct_is_untracked(const struct nf_conn *ct) +{ + return test_bit(IPS_UNTRACKED_BIT, &ct->status); +} + +/* Packet is received from loopback */ +static inline bool nf_is_loopback_packet(const struct sk_buff *skb) { - return (skb->nfct == &nf_conntrack_untracked.ct_general); + return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; } -extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); +struct kernel_param; + +int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); extern unsigned int nf_conntrack_htable_size; -extern int nf_conntrack_checksum; -extern atomic_t nf_conntrack_count; -extern int nf_conntrack_max; - -DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); -#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) -#define NF_CT_STAT_INC_ATOMIC(count) \ -do { \ - local_bh_disable(); \ - __get_cpu_var(nf_conntrack_stat).count++; \ - local_bh_enable(); \ -} while (0) - -#endif /* __KERNEL__ */ +extern unsigned int nf_conntrack_max; +extern unsigned int nf_conntrack_hash_rnd; +void init_nf_conntrack_hash_rnd(void); + +void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); + +#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) +#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) + +#define MODULE_ALIAS_NFCT_HELPER(helper) \ + MODULE_ALIAS("nfct-helper-" helper) + #endif /* _NF_CONNTRACK_H */ diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h new file mode 100644 index 00000000000..79d8d16732b --- /dev/null +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -0,0 +1,69 @@ +/* + * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _NF_CONNTRACK_ACCT_H +#define _NF_CONNTRACK_ACCT_H +#include <net/net_namespace.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_extend.h> + +struct nf_conn_counter { + atomic64_t packets; + atomic64_t bytes; +}; + +struct nf_conn_acct { + struct nf_conn_counter counter[IP_CT_DIR_MAX]; +}; + +static inline +struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct) +{ + return nf_ct_ext_find(ct, NF_CT_EXT_ACCT); +} + +static inline +struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) +{ + struct net *net = nf_ct_net(ct); + struct nf_conn_acct *acct; + + if (!net->ct.sysctl_acct) + return NULL; + + acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp); + if (!acct) + pr_debug("failed to add accounting extension area"); + + + return acct; +}; + +unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, + int dir); + +/* Check if connection tracking accounting is enabled */ +static inline bool nf_ct_acct_enabled(struct net *net) +{ + return net->ct.sysctl_acct != 0; +} + +/* Enable/disable connection tracking accounting */ +static inline void nf_ct_set_acct(struct net *net, bool enable) +{ + net->ct.sysctl_acct = enable; +} + +int nf_conntrack_acct_pernet_init(struct net *net); +void nf_conntrack_acct_pernet_fini(struct net *net); + +int nf_conntrack_acct_init(void); +void nf_conntrack_acct_fini(void); +#endif /* _NF_CONNTRACK_ACCT_H */ diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 9ee26469c75..cc0c1882760 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -20,37 +20,42 @@ /* This header is used to share core functionality between the standalone connection tracking module, and the compatibility layer's use of connection tracking. */ -extern unsigned int nf_conntrack_in(int pf, - unsigned int hooknum, - struct sk_buff *skb); - -extern int nf_conntrack_init(void); -extern void nf_conntrack_cleanup(void); - -extern int nf_conntrack_proto_init(void); -extern void nf_conntrack_proto_fini(void); - -extern int -nf_ct_get_tuple(const struct sk_buff *skb, - unsigned int nhoff, - unsigned int dataoff, - u_int16_t l3num, - u_int8_t protonum, - struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_l4proto *l4proto); - -extern int -nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_l4proto *l4proto); +unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, + struct sk_buff *skb); + +int nf_conntrack_init_net(struct net *net); +void nf_conntrack_cleanup_net(struct net *net); +void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list); + +int nf_conntrack_proto_pernet_init(struct net *net); +void nf_conntrack_proto_pernet_fini(struct net *net); + +int nf_conntrack_proto_init(void); +void nf_conntrack_proto_fini(void); + +int nf_conntrack_init_start(void); +void nf_conntrack_cleanup_start(void); + +void nf_conntrack_init_end(void); +void nf_conntrack_cleanup_end(void); + +bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, + unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, + struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_l3proto *l3proto, + const struct nf_conntrack_l4proto *l4proto); + +bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig, + const struct nf_conntrack_l3proto *l3proto, + const struct nf_conntrack_l4proto *l4proto); /* Find a connection corresponding to a tuple. */ -extern struct nf_conntrack_tuple_hash * -nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple); +struct nf_conntrack_tuple_hash * +nf_conntrack_find_get(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple); -extern int __nf_conntrack_confirm(struct sk_buff *skb); +int __nf_conntrack_confirm(struct sk_buff *skb); /* Confirm a connection: returns NF_DROP if packet must be dropped. */ static inline int nf_conntrack_confirm(struct sk_buff *skb) @@ -58,10 +63,11 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) struct nf_conn *ct = (struct nf_conn *)skb->nfct; int ret = NF_ACCEPT; - if (ct) { - if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) + if (ct && !nf_ct_is_untracked(ct)) { + if (!nf_ct_is_confirmed(ct)) ret = __nf_conntrack_confirm(skb); - nf_ct_deliver_cached_events(ct); + if (likely(ret == NF_ACCEPT)) + nf_ct_deliver_cached_events(ct); } return ret; } @@ -71,8 +77,13 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *proto); -extern struct hlist_head *nf_conntrack_hash; -extern spinlock_t nf_conntrack_lock ; -extern struct hlist_head unconfirmed; +#ifdef CONFIG_LOCKDEP +# define CONNTRACK_LOCKS 8 +#else +# define CONNTRACK_LOCKS 1024 +#endif +extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; + +extern spinlock_t nf_conntrack_expect_lock; #endif /* _NF_CONNTRACK_CORE_H */ diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index f0b9078235c..0e3d08e4b1d 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -6,70 +6,255 @@ #define _NF_CONNTRACK_ECACHE_H #include <net/netfilter/nf_conntrack.h> -#include <linux/notifier.h> -#include <linux/interrupt.h> +#include <net/net_namespace.h> #include <net/netfilter/nf_conntrack_expect.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <net/netfilter/nf_conntrack_extend.h> -#ifdef CONFIG_NF_CONNTRACK_EVENTS struct nf_conntrack_ecache { + unsigned long cache; /* bitops want long */ + unsigned long missed; /* missed events */ + u16 ctmask; /* bitmask of ct events to be delivered */ + u16 expmask; /* bitmask of expect events to be delivered */ + u32 portid; /* netlink portid of destroyer */ + struct timer_list timeout; +}; + +static inline struct nf_conntrack_ecache * +nf_ct_ecache_find(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE); +#else + return NULL; +#endif +} + +static inline struct nf_conntrack_ecache * +nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *e; + + if (!ctmask && !expmask && net->ct.sysctl_events) { + ctmask = ~0; + expmask = ~0; + } + if (!ctmask && !expmask) + return NULL; + + e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); + if (e) { + e->ctmask = ctmask; + e->expmask = expmask; + } + return e; +#else + return NULL; +#endif +}; + +#ifdef CONFIG_NF_CONNTRACK_EVENTS +/* This structure is passed to event handler */ +struct nf_ct_event { struct nf_conn *ct; - unsigned int events; + u32 portid; + int report; }; -DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); -#define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) +struct nf_ct_event_notifier { + int (*fcn)(unsigned int events, struct nf_ct_event *item); +}; -extern struct atomic_notifier_head nf_conntrack_chain; -extern int nf_conntrack_register_notifier(struct notifier_block *nb); -extern int nf_conntrack_unregister_notifier(struct notifier_block *nb); +int nf_conntrack_register_notifier(struct net *net, + struct nf_ct_event_notifier *nb); +void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *nb); -extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); -extern void __nf_ct_event_cache_init(struct nf_conn *ct); -extern void nf_ct_event_cache_flush(void); +void nf_ct_deliver_cached_events(struct nf_conn *ct); static inline void -nf_conntrack_event_cache(enum ip_conntrack_events event, - const struct sk_buff *skb) +nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) { - struct nf_conn *ct = (struct nf_conn *)skb->nfct; - struct nf_conntrack_ecache *ecache; - - local_bh_disable(); - ecache = &__get_cpu_var(nf_conntrack_ecache); - if (ct != ecache->ct) - __nf_ct_event_cache_init(ct); - ecache->events |= event; - local_bh_enable(); + struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *e; + + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + return; + + e = nf_ct_ecache_find(ct); + if (e == NULL) + return; + + set_bit(event, &e->cache); } -static inline void nf_conntrack_event(enum ip_conntrack_events event, - struct nf_conn *ct) +static inline int +nf_conntrack_eventmask_report(unsigned int eventmask, + struct nf_conn *ct, + u32 portid, + int report) { - if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) - atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); + int ret = 0; + struct net *net = nf_ct_net(ct); + struct nf_ct_event_notifier *notify; + struct nf_conntrack_ecache *e; + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); + if (notify == NULL) + goto out_unlock; + + e = nf_ct_ecache_find(ct); + if (e == NULL) + goto out_unlock; + + if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { + struct nf_ct_event item = { + .ct = ct, + .portid = e->portid ? e->portid : portid, + .report = report + }; + /* This is a resent of a destroy event? If so, skip missed */ + unsigned long missed = e->portid ? 0 : e->missed; + + if (!((eventmask | missed) & e->ctmask)) + goto out_unlock; + + ret = notify->fcn(eventmask | missed, &item); + if (unlikely(ret < 0 || missed)) { + spin_lock_bh(&ct->lock); + if (ret < 0) { + /* This is a destroy event that has been + * triggered by a process, we store the PORTID + * to include it in the retransmission. */ + if (eventmask & (1 << IPCT_DESTROY) && + e->portid == 0 && portid != 0) + e->portid = portid; + else + e->missed |= eventmask; + } else + e->missed &= ~missed; + spin_unlock_bh(&ct->lock); + } + } +out_unlock: + rcu_read_unlock(); + return ret; } -extern struct atomic_notifier_head nf_ct_expect_chain; -extern int nf_ct_expect_register_notifier(struct notifier_block *nb); -extern int nf_ct_expect_unregister_notifier(struct notifier_block *nb); +static inline int +nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, + u32 portid, int report) +{ + return nf_conntrack_eventmask_report(1 << event, ct, portid, report); +} + +static inline int +nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) +{ + return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); +} + +struct nf_exp_event { + struct nf_conntrack_expect *exp; + u32 portid; + int report; +}; + +struct nf_exp_event_notifier { + int (*fcn)(unsigned int events, struct nf_exp_event *item); +}; + +int nf_ct_expect_register_notifier(struct net *net, + struct nf_exp_event_notifier *nb); +void nf_ct_expect_unregister_notifier(struct net *net, + struct nf_exp_event_notifier *nb); + +static inline void +nf_ct_expect_event_report(enum ip_conntrack_expect_events event, + struct nf_conntrack_expect *exp, + u32 portid, + int report) +{ + struct net *net = nf_ct_exp_net(exp); + struct nf_exp_event_notifier *notify; + struct nf_conntrack_ecache *e; + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_expect_event_cb); + if (notify == NULL) + goto out_unlock; + + e = nf_ct_ecache_find(exp->master); + if (e == NULL) + goto out_unlock; + + if (e->expmask & (1 << event)) { + struct nf_exp_event item = { + .exp = exp, + .portid = portid, + .report = report + }; + notify->fcn(1 << event, &item); + } +out_unlock: + rcu_read_unlock(); +} static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp) { - atomic_notifier_call_chain(&nf_ct_expect_chain, event, exp); + nf_ct_expect_event_report(event, exp, 0, 0); } +int nf_conntrack_ecache_pernet_init(struct net *net); +void nf_conntrack_ecache_pernet_fini(struct net *net); + +int nf_conntrack_ecache_init(void); +void nf_conntrack_ecache_fini(void); #else /* CONFIG_NF_CONNTRACK_EVENTS */ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, - const struct sk_buff *skb) {} -static inline void nf_conntrack_event(enum ip_conntrack_events event, - struct nf_conn *ct) {} + struct nf_conn *ct) {} +static inline int nf_conntrack_eventmask_report(unsigned int eventmask, + struct nf_conn *ct, + u32 portid, + int report) { return 0; } +static inline int nf_conntrack_event(enum ip_conntrack_events event, + struct nf_conn *ct) { return 0; } +static inline int nf_conntrack_event_report(enum ip_conntrack_events event, + struct nf_conn *ct, + u32 portid, + int report) { return 0; } static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp) {} -static inline void nf_ct_event_cache_flush(void) {} +static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, + struct nf_conntrack_expect *exp, + u32 portid, + int report) {} + +static inline int nf_conntrack_ecache_pernet_init(struct net *net) +{ + return 0; +} + +static inline void nf_conntrack_ecache_pernet_fini(struct net *net) +{ +} + +static inline int nf_conntrack_ecache_init(void) +{ + return 0; +} + +static inline void nf_conntrack_ecache_fini(void) +{ +} #endif /* CONFIG_NF_CONNTRACK_EVENTS */ #endif /*_NF_CONNTRACK_ECACHE_H*/ diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cb608a1b44e..3f3aecbc863 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -6,12 +6,10 @@ #define _NF_CONNTRACK_EXPECT_H #include <net/netfilter/nf_conntrack.h> -extern struct hlist_head *nf_ct_expect_hash; extern unsigned int nf_ct_expect_hsize; extern unsigned int nf_ct_expect_max; -struct nf_conntrack_expect -{ +struct nf_conntrack_expect { /* Conntrack expectation list member */ struct hlist_node lnode; @@ -41,8 +39,11 @@ struct nf_conntrack_expect /* Flags */ unsigned int flags; + /* Expectation class */ + unsigned int class; + #ifdef CONFIG_NF_NAT_NEEDED - __be32 saved_ip; + union nf_inet_addr saved_addr; /* This is the original per-proto part, used to map the * expected connection the way the recipient expects. */ union nf_conntrack_man_proto saved_proto; @@ -53,33 +54,63 @@ struct nf_conntrack_expect struct rcu_head rcu; }; -#define NF_CT_EXPECT_PERMANENT 0x1 +static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp) +{ + return nf_ct_net(exp->master); +} + +#define NF_CT_EXP_POLICY_NAME_LEN 16 + +struct nf_conntrack_expect_policy { + unsigned int max_expected; + unsigned int timeout; + char name[NF_CT_EXP_POLICY_NAME_LEN]; +}; + +#define NF_CT_EXPECT_CLASS_DEFAULT 0 + +int nf_conntrack_expect_pernet_init(struct net *net); +void nf_conntrack_expect_pernet_fini(struct net *net); int nf_conntrack_expect_init(void); void nf_conntrack_expect_fini(void); struct nf_conntrack_expect * -__nf_ct_expect_find(const struct nf_conntrack_tuple *tuple); +__nf_ct_expect_find(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple); struct nf_conntrack_expect * -nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple); +nf_ct_expect_find_get(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple); struct nf_conntrack_expect * -nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple); +nf_ct_find_expectation(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple); + +void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, + u32 portid, int report); +static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) +{ + nf_ct_unlink_expect_report(exp, 0, 0); +} -void nf_ct_unlink_expect(struct nf_conntrack_expect *exp); void nf_ct_remove_expectations(struct nf_conn *ct); void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); /* Allocate space for an expectation: this is mandatory before calling nf_ct_expect_related. You will have to call put afterwards. */ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me); -void nf_ct_expect_init(struct nf_conntrack_expect *, int, - union nf_inet_addr *, - union nf_inet_addr *, - u_int8_t, __be16 *, __be16 *); +void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t, + const union nf_inet_addr *, + const union nf_inet_addr *, + u_int8_t, const __be16 *, const __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); -int nf_ct_expect_related(struct nf_conntrack_expect *expect); +int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, + u32 portid, int report); +static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) +{ + return nf_ct_expect_related_report(expect, 0, 0); +} #endif /*_NF_CONNTRACK_EXPECT_H*/ diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index f736e842977..55d15049ab2 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -1,28 +1,65 @@ #ifndef _NF_CONNTRACK_EXTEND_H #define _NF_CONNTRACK_EXTEND_H +#include <linux/slab.h> + #include <net/netfilter/nf_conntrack.h> -enum nf_ct_ext_id -{ +enum nf_ct_ext_id { NF_CT_EXT_HELPER, +#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) NF_CT_EXT_NAT, +#endif + NF_CT_EXT_SEQADJ, + NF_CT_EXT_ACCT, +#ifdef CONFIG_NF_CONNTRACK_EVENTS + NF_CT_EXT_ECACHE, +#endif +#ifdef CONFIG_NF_CONNTRACK_ZONES + NF_CT_EXT_ZONE, +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + NF_CT_EXT_TSTAMP, +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + NF_CT_EXT_TIMEOUT, +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + NF_CT_EXT_LABELS, +#endif +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + NF_CT_EXT_SYNPROXY, +#endif NF_CT_EXT_NUM, }; #define NF_CT_EXT_HELPER_TYPE struct nf_conn_help #define NF_CT_EXT_NAT_TYPE struct nf_conn_nat +#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj +#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct +#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache +#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone +#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp +#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout +#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels +#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { - u8 offset[NF_CT_EXT_NUM]; - u8 len; + struct rcu_head rcu; + u16 offset[NF_CT_EXT_NUM]; + u16 len; char data[0]; }; -static inline int nf_ct_ext_exist(const struct nf_conn *ct, u8 id) +static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) +{ + return !!ext->offset[id]; +} + +static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) { - return (ct->ext && ct->ext->offset[id]); + return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) @@ -36,7 +73,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) ((id##_TYPE *)__nf_ct_ext_find((ext), (id))) /* Destroy all relationships */ -extern void __nf_ct_ext_destroy(struct nf_conn *ct); +void __nf_ct_ext_destroy(struct nf_conn *ct); static inline void nf_ct_ext_destroy(struct nf_conn *ct) { if (ct->ext) @@ -49,19 +86,21 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct) static inline void nf_ct_ext_free(struct nf_conn *ct) { if (ct->ext) - kfree(ct->ext); + kfree_rcu(ct->ext, rcu); } /* Add this type, returns pointer to data or NULL. */ -void * -__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); +void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, + size_t var_alloc_len, gfp_t gfp); + #define nf_ct_ext_add(ct, id, gfp) \ - ((id##_TYPE *)__nf_ct_ext_add((ct), (id), (gfp))) + ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp))) +#define nf_ct_ext_add_length(ct, id, len, gfp) \ + ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp))) #define NF_CT_EXT_F_PREALLOC 0x0001 -struct nf_ct_ext_type -{ +struct nf_ct_ext_type { /* Destroys relationships (can be NULL). */ void (*destroy)(struct nf_conn *ct); /* Called when realloacted (can be NULL). diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 4ca125e9b3c..6cf614bc002 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -11,18 +11,26 @@ #define _NF_CONNTRACK_HELPER_H #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> +#include <net/netfilter/nf_conntrack_expect.h> struct module; -struct nf_conntrack_helper -{ +enum nf_ct_helper_flags { + NF_CT_HELPER_F_USERSPACE = (1 << 0), + NF_CT_HELPER_F_CONFIGURED = (1 << 1), +}; + +#define NF_CT_HELPER_NAME_LEN 16 + +struct nf_conntrack_helper { struct hlist_node hnode; /* Internal use. */ - const char *name; /* name of the module */ + char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ struct module *me; /* pointer to self */ - unsigned int max_expected; /* Maximum number of concurrent - * expected connections */ - unsigned int timeout; /* timeout for expecteds */ + const struct nf_conntrack_expect_policy *expect_policy; + + /* length of internal data, ie. sizeof(struct nf_ct_*_master) */ + size_t data_len; /* Tuple of things we will help (compared against server response) */ struct nf_conntrack_tuple tuple; @@ -36,26 +44,76 @@ struct nf_conntrack_helper void (*destroy)(struct nf_conn *ct); + int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct); int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); + unsigned int expect_class_max; + + unsigned int flags; + unsigned int queue_num; /* For user-space helpers. */ }; -extern struct nf_conntrack_helper * -__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple); +struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, + u16 l3num, u8 protonum); + +struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, + u16 l3num, + u8 protonum); + +int nf_conntrack_helper_register(struct nf_conntrack_helper *); +void nf_conntrack_helper_unregister(struct nf_conntrack_helper *); -extern struct nf_conntrack_helper * -__nf_conntrack_helper_find_byname(const char *name); +struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, + struct nf_conntrack_helper *helper, + gfp_t gfp); -extern int nf_conntrack_helper_register(struct nf_conntrack_helper *); -extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *); +int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, + gfp_t flags); -extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp); +void nf_ct_helper_destroy(struct nf_conn *ct); static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) { return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); } -extern int nf_conntrack_helper_init(void); -extern void nf_conntrack_helper_fini(void); +static inline void *nfct_help_data(const struct nf_conn *ct) +{ + struct nf_conn_help *help; + + help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER); + + return (void *)help->data; +} + +int nf_conntrack_helper_pernet_init(struct net *net); +void nf_conntrack_helper_pernet_fini(struct net *net); + +int nf_conntrack_helper_init(void); +void nf_conntrack_helper_fini(void); + +int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int timeout); + +struct nf_ct_helper_expectfn { + struct list_head head; + const char *name; + void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp); +}; + +__printf(3,4) +void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, + const char *fmt, ...); + +void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n); +void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n); +struct nf_ct_helper_expectfn * +nf_ct_helper_expectfn_find_by_name(const char *name); +struct nf_ct_helper_expectfn * +nf_ct_helper_expectfn_find_by_symbol(const void *symbol); + +extern struct hlist_head *nf_ct_helper_hash; +extern unsigned int nf_ct_helper_hsize; #endif /*_NF_CONNTRACK_HELPER_H*/ diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index b886e3ae6ca..adc1fa3dd7a 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -16,8 +16,7 @@ #include <linux/seq_file.h> #include <net/netfilter/nf_conntrack.h> -struct nf_conntrack_l3proto -{ +struct nf_conntrack_l3proto { /* L3 Protocol Family number. ex) PF_INET */ u_int16_t l3proto; @@ -28,31 +27,20 @@ struct nf_conntrack_l3proto * Try to fill in the third arg: nhoff is offset of l3 proto * hdr. Return true if possible. */ - int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff, - struct nf_conntrack_tuple *tuple); + bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff, + struct nf_conntrack_tuple *tuple); /* * Invert the per-proto part of the tuple: ie. turn xmit into reply. * Some packets can't be inverted: return 0 in that case. */ - int (*invert_tuple)(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig); + bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig); /* Print out the per-protocol part of the tuple. */ int (*print_tuple)(struct seq_file *s, const struct nf_conntrack_tuple *); - /* Returns verdict for packet, or -1 for invalid. */ - int (*packet)(struct nf_conn *ct, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo); - - /* - * Called when a new connection for this protocol found; - * returns TRUE if it's OK. If so, packet() called next. - */ - int (*new)(struct nf_conn *ct, const struct sk_buff *skb); - /* * Called before tracking. * *dataoff: offset of protocol header (TCP, UDP,...) in skb @@ -64,27 +52,41 @@ struct nf_conntrack_l3proto int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); + /* + * Calculate size of tuple nlattr + */ + int (*nlattr_tuple_size)(void); + int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; + size_t nla_size; + #ifdef CONFIG_SYSCTL - struct ctl_table_header *ctl_table_header; - struct ctl_path *ctl_table_path; - struct ctl_table *ctl_table; + const char *ctl_table_path; #endif /* CONFIG_SYSCTL */ + /* Init l3proto pernet data */ + int (*init_net)(struct net *net); + /* Module (if any) which this is connected to. */ struct module *me; }; -extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX]; +extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX]; + +/* Protocol pernet registration. */ +int nf_ct_l3proto_pernet_register(struct net *net, + struct nf_conntrack_l3proto *proto); +void nf_ct_l3proto_pernet_unregister(struct net *net, + struct nf_conntrack_l3proto *proto); + +/* Protocol global registration. */ +int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto); +void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto); -/* Protocol registration. */ -extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); -extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); -extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto); -extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p); +struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto); /* Existing built-in protocols */ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic; diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index efc16eccddb..4c8d573830b 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -12,11 +12,11 @@ #include <linux/netlink.h> #include <net/netlink.h> #include <net/netfilter/nf_conntrack.h> +#include <net/netns/generic.h> struct seq_file; -struct nf_conntrack_l4proto -{ +struct nf_conntrack_l4proto { /* L3 Protocol number. */ u_int16_t l3proto; @@ -25,65 +25,83 @@ struct nf_conntrack_l4proto /* Try to fill in the third arg: dataoff is offset past network protocol hdr. Return true if possible. */ - int (*pkt_to_tuple)(const struct sk_buff *skb, - unsigned int dataoff, - struct nf_conntrack_tuple *tuple); + bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, + struct nf_conntrack_tuple *tuple); /* Invert the per-proto part of the tuple: ie. turn xmit into reply. * Some packets can't be inverted: return 0 in that case. */ - int (*invert_tuple)(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig); + bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, + const struct nf_conntrack_tuple *orig); /* Returns verdict for packet, or -1 for invalid. */ int (*packet)(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, - int pf, - unsigned int hooknum); + u_int8_t pf, + unsigned int hooknum, + unsigned int *timeouts); /* Called when a new connection for this protocol found; * returns TRUE if it's OK. If so, packet() called next. */ - int (*new)(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff); + bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, + unsigned int dataoff, unsigned int *timeouts); /* Called when a conntrack entry is destroyed */ void (*destroy)(struct nf_conn *ct); - int (*error)(struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, - int pf, unsigned int hooknum); + int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, + unsigned int dataoff, enum ip_conntrack_info *ctinfo, + u_int8_t pf, unsigned int hooknum); /* Print out the per-protocol part of the tuple. Return like seq_* */ int (*print_tuple)(struct seq_file *s, const struct nf_conntrack_tuple *); /* Print out the private part of the conntrack. */ - int (*print_conntrack)(struct seq_file *s, const struct nf_conn *); + int (*print_conntrack)(struct seq_file *s, struct nf_conn *); + + /* Return the array of timeouts for this protocol. */ + unsigned int *(*get_timeouts)(struct net *net); /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, - const struct nf_conn *ct); + struct nf_conn *ct); + /* Calculate protoinfo nlattr size */ + int (*nlattr_size)(void); /* convert nfnetlink attributes to protoinfo */ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); + /* Calculate tuple nlattr size */ + int (*nlattr_tuple_size)(void); int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; -#ifdef CONFIG_SYSCTL - struct ctl_table_header **ctl_table_header; - struct ctl_table *ctl_table; - unsigned int *ctl_table_users; -#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT - struct ctl_table_header *ctl_compat_table_header; - struct ctl_table *ctl_compat_table; -#endif + size_t nla_size; + +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + struct { + size_t obj_size; + int (*nlattr_to_obj)(struct nlattr *tb[], + struct net *net, void *data); + int (*obj_to_nlattr)(struct sk_buff *skb, const void *data); + + unsigned int nlattr_max; + const struct nla_policy *nla_policy; + } ctnl_timeout; #endif + int *net_id; + /* Init l4proto pernet data */ + int (*init_net)(struct net *net, u_int16_t proto); + + /* Return the per-net protocol part. */ + struct nf_proto_net *(*get_net_proto)(struct net *net); + /* Protocol name */ const char *name; @@ -91,47 +109,50 @@ struct nf_conntrack_l4proto struct module *me; }; -/* Existing built-in protocols */ -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; +/* Existing built-in generic protocol */ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; #define MAX_NF_CT_PROTO 256 -extern struct nf_conntrack_l4proto * -__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto); +struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto, + u_int8_t l4proto); -extern struct nf_conntrack_l4proto * -nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t protocol); +struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto, + u_int8_t l4proto); +void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); -extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); +/* Protocol pernet registration. */ +int nf_ct_l4proto_pernet_register(struct net *net, + struct nf_conntrack_l4proto *proto); +void nf_ct_l4proto_pernet_unregister(struct net *net, + struct nf_conntrack_l4proto *proto); -/* Protocol registration. */ -extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto); -extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto); +/* Protocol global registration. */ +int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto); +void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto); + +static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn) +{ +#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) + kfree(pn->ctl_compat_table); + pn->ctl_compat_table = NULL; +#endif +} /* Generic netlink helpers */ -extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple); -extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], - struct nf_conntrack_tuple *t); +int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple); +int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], + struct nf_conntrack_tuple *t); +int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; -/* Log invalid packets */ -extern unsigned int nf_ct_log_invalid; - #ifdef CONFIG_SYSCTL -#ifdef DEBUG_INVALID_PACKETS -#define LOG_INVALID(proto) \ - (nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW) -#else -#define LOG_INVALID(proto) \ - ((nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW) \ - && net_ratelimit()) -#endif +#define LOG_INVALID(net, proto) \ + ((net)->ct.sysctl_log_invalid == (proto) || \ + (net)->ct.sysctl_log_invalid == IPPROTO_RAW) #else -#define LOG_INVALID(proto) 0 +static inline int LOG_INVALID(struct net *net, int proto) { return 0; } #endif /* CONFIG_SYSCTL */ #endif /*_NF_CONNTRACK_PROTOCOL_H*/ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h new file mode 100644 index 00000000000..dec6336bf85 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -0,0 +1,60 @@ +#include <linux/types.h> +#include <net/net_namespace.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_extend.h> + +#include <uapi/linux/netfilter/xt_connlabel.h> + +#define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) + +struct nf_conn_labels { + u8 words; + unsigned long bits[]; +}; + +static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_LABELS + return nf_ct_ext_find(ct, NF_CT_EXT_LABELS); +#else + return NULL; +#endif +} + +static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_LABELS + struct nf_conn_labels *cl_ext; + struct net *net = nf_ct_net(ct); + u8 words; + + words = ACCESS_ONCE(net->ct.label_words); + if (words == 0) + return NULL; + + cl_ext = nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, + words * sizeof(long), GFP_ATOMIC); + if (cl_ext != NULL) + cl_ext->words = words; + + return cl_ext; +#else + return NULL; +#endif +} + +bool nf_connlabel_match(const struct nf_conn *ct, u16 bit); +int nf_connlabel_set(struct nf_conn *ct, u16 bit); + +int nf_connlabels_replace(struct nf_conn *ct, + const u32 *data, const u32 *mask, unsigned int words); + +#ifdef CONFIG_NF_CONNTRACK_LABELS +int nf_conntrack_labels_init(void); +void nf_conntrack_labels_fini(void); +#else +static inline int nf_conntrack_labels_init(void) { return 0; } +static inline void nf_conntrack_labels_fini(void) {} +#endif diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h new file mode 100644 index 00000000000..4b3362991a2 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_seqadj.h @@ -0,0 +1,47 @@ +#ifndef _NF_CONNTRACK_SEQADJ_H +#define _NF_CONNTRACK_SEQADJ_H + +#include <net/netfilter/nf_conntrack_extend.h> + +/** + * struct nf_ct_seqadj - sequence number adjustment information + * + * @correction_pos: position of the last TCP sequence number modification + * @offset_before: sequence number offset before last modification + * @offset_after: sequence number offset after last modification + */ +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[IP_CT_DIR_MAX]; +}; + +static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct) +{ + return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ); +} + +static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) +{ + return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); +} + +int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + s32 off); +int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + __be32 seq, s32 off); +void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, s32 off); + +int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, unsigned int protoff); +s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq); + +int nf_conntrack_seqadj_init(void); +void nf_conntrack_seqadj_fini(void); + +#endif /* _NF_CONNTRACK_SEQADJ_H */ diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h new file mode 100644 index 00000000000..6793614e650 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -0,0 +1,75 @@ +#ifndef _NF_CONNTRACK_SYNPROXY_H +#define _NF_CONNTRACK_SYNPROXY_H + +#include <net/netns/generic.h> + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY); +#else + return NULL; +#endif +} + +static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC); +#else + return NULL; +#endif +} + +struct synproxy_stats { + unsigned int syn_received; + unsigned int cookie_invalid; + unsigned int cookie_valid; + unsigned int cookie_retrans; + unsigned int conn_reopened; +}; + +struct synproxy_net { + struct nf_conn *tmpl; + struct synproxy_stats __percpu *stats; +}; + +extern int synproxy_net_id; +static inline struct synproxy_net *synproxy_pernet(struct net *net) +{ + return net_generic(net, synproxy_net_id); +} + +struct synproxy_options { + u8 options; + u8 wscale; + u16 mss; + u32 tsval; + u32 tsecr; +}; + +struct tcphdr; +struct xt_synproxy_info; +bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + const struct tcphdr *th, + struct synproxy_options *opts); +unsigned int synproxy_options_size(const struct synproxy_options *opts); +void synproxy_build_options(struct tcphdr *th, + const struct synproxy_options *opts); + +void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, + struct synproxy_options *opts); +void synproxy_check_timestamp_cookie(struct synproxy_options *opts); + +unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff, + struct tcphdr *th, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct nf_conn_synproxy *synproxy); + +#endif /* _NF_CONNTRACK_SYNPROXY_H */ diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h new file mode 100644 index 00000000000..62308713dd7 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -0,0 +1,98 @@ +#ifndef _NF_CONNTRACK_TIMEOUT_H +#define _NF_CONNTRACK_TIMEOUT_H + +#include <net/net_namespace.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_extend.h> + +#define CTNL_TIMEOUT_NAME_MAX 32 + +struct ctnl_timeout { + struct list_head head; + struct rcu_head rcu_head; + atomic_t refcnt; + char name[CTNL_TIMEOUT_NAME_MAX]; + __u16 l3num; + struct nf_conntrack_l4proto *l4proto; + char data[0]; +}; + +struct nf_conn_timeout { + struct ctnl_timeout *timeout; +}; + +#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data) + +static inline +struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT); +#else + return NULL; +#endif +} + +static inline +struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, + struct ctnl_timeout *timeout, + gfp_t gfp) +{ +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + struct nf_conn_timeout *timeout_ext; + + timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp); + if (timeout_ext == NULL) + return NULL; + + timeout_ext->timeout = timeout; + + return timeout_ext; +#else + return NULL; +#endif +}; + +static inline unsigned int * +nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct, + struct nf_conntrack_l4proto *l4proto) +{ +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + struct nf_conn_timeout *timeout_ext; + unsigned int *timeouts; + + timeout_ext = nf_ct_timeout_find(ct); + if (timeout_ext) + timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); + else + timeouts = l4proto->get_timeouts(net); + + return timeouts; +#else + return l4proto->get_timeouts(net); +#endif +} + +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +int nf_conntrack_timeout_init(void); +void nf_conntrack_timeout_fini(void); +#else +static inline int nf_conntrack_timeout_init(void) +{ + return 0; +} + +static inline void nf_conntrack_timeout_fini(void) +{ + return; +} +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ + +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(const char *name); +extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout); +#endif + +#endif /* _NF_CONNTRACK_TIMEOUT_H */ diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h new file mode 100644 index 00000000000..300ae2209f2 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_timestamp.h @@ -0,0 +1,78 @@ +#ifndef _NF_CONNTRACK_TSTAMP_H +#define _NF_CONNTRACK_TSTAMP_H + +#include <net/net_namespace.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_extend.h> + +struct nf_conn_tstamp { + u_int64_t start; + u_int64_t stop; +}; + +static inline +struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP); +#else + return NULL; +#endif +} + +static inline +struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) +{ +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + struct net *net = nf_ct_net(ct); + + if (!net->ct.sysctl_tstamp) + return NULL; + + return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp); +#else + return NULL; +#endif +}; + +static inline bool nf_ct_tstamp_enabled(struct net *net) +{ + return net->ct.sysctl_tstamp != 0; +} + +static inline void nf_ct_set_tstamp(struct net *net, bool enable) +{ + net->ct.sysctl_tstamp = enable; +} + +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP +int nf_conntrack_tstamp_pernet_init(struct net *net); +void nf_conntrack_tstamp_pernet_fini(struct net *net); + +int nf_conntrack_tstamp_init(void); +void nf_conntrack_tstamp_fini(void); +#else +static inline int nf_conntrack_tstamp_pernet_init(struct net *net) +{ + return 0; +} + +static inline void nf_conntrack_tstamp_pernet_fini(struct net *net) +{ + return; +} + +static inline int nf_conntrack_tstamp_init(void) +{ + return 0; +} + +static inline void nf_conntrack_tstamp_fini(void) +{ + return; +} +#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */ + +#endif /* _NF_CONNTRACK_TSTAMP_H */ diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index e69ab2e8759..aea3f8221be 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -12,6 +12,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <linux/list_nulls.h> /* A `tuple' is a structure containing the information to uniquely identify a connection. ie. if two packets have the same tuple, they @@ -23,33 +24,8 @@ #define NF_CT_TUPLE_L3SIZE ARRAY_SIZE(((union nf_inet_addr *)NULL)->all) -/* The protocol-specific manipulable parts of the tuple: always in - network order! */ -union nf_conntrack_man_proto -{ - /* Add other protocols here. */ - __be16 all; - - struct { - __be16 port; - } tcp; - struct { - __be16 port; - } udp; - struct { - __be16 id; - } icmp; - struct { - __be16 port; - } sctp; - struct { - __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ - } gre; -}; - /* The manipulable part of the tuple. */ -struct nf_conntrack_man -{ +struct nf_conntrack_man { union nf_inet_addr u3; union nf_conntrack_man_proto u; /* Layer 3 protocol */ @@ -57,8 +33,7 @@ struct nf_conntrack_man }; /* This contains the information to distinguish a connection. */ -struct nf_conntrack_tuple -{ +struct nf_conntrack_tuple { struct nf_conntrack_man src; /* These are the parts of the tuple which are fixed. */ @@ -79,6 +54,9 @@ struct nf_conntrack_tuple } icmp; struct { __be16 port; + } dccp; + struct { + __be16 port; } sctp; struct { __be16 key; @@ -93,109 +71,113 @@ struct nf_conntrack_tuple } dst; }; -struct nf_conntrack_tuple_mask -{ +struct nf_conntrack_tuple_mask { struct { union nf_inet_addr u3; union nf_conntrack_man_proto u; } src; }; -/* This is optimized opposed to a memset of the whole structure. Everything we - * really care about is the source/destination unions */ -#define NF_CT_TUPLE_U_BLANK(tuple) \ - do { \ - (tuple)->src.u.all = 0; \ - (tuple)->dst.u.all = 0; \ - memset(&(tuple)->src.u3, 0, sizeof((tuple)->src.u3)); \ - memset(&(tuple)->dst.u3, 0, sizeof((tuple)->dst.u3)); \ - } while (0) +static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t) +{ +#ifdef DEBUG + printk("tuple %p: %u %pI4:%hu -> %pI4:%hu\n", + t, t->dst.protonum, + &t->src.u3.ip, ntohs(t->src.u.all), + &t->dst.u3.ip, ntohs(t->dst.u.all)); +#endif +} -#ifdef __KERNEL__ +static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t) +{ +#ifdef DEBUG + printk("tuple %p: %u %pI6 %hu -> %pI6 %hu\n", + t, t->dst.protonum, + t->src.u3.all, ntohs(t->src.u.all), + t->dst.u3.all, ntohs(t->dst.u.all)); +#endif +} -#define NF_CT_DUMP_TUPLE(tp) \ -pr_debug("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \ - (tp), (tp)->src.l3num, (tp)->dst.protonum, \ - NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \ - NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all)) +static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t) +{ + switch (t->src.l3num) { + case AF_INET: + nf_ct_dump_tuple_ip(t); + break; + case AF_INET6: + nf_ct_dump_tuple_ipv6(t); + break; + } +} /* If we're the first tuple, it's the original dir. */ #define NF_CT_DIRECTION(h) \ ((enum ip_conntrack_dir)(h)->tuple.dst.dir) /* Connections have two entries in the hash table: one for each way */ -struct nf_conntrack_tuple_hash -{ - struct hlist_node hnode; +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; struct nf_conntrack_tuple tuple; }; -#endif /* __KERNEL__ */ - -static inline int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1, - const struct nf_conntrack_tuple *t2) +static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1, + const struct nf_conntrack_tuple *t2) { - return (t1->src.u3.all[0] == t2->src.u3.all[0] && - t1->src.u3.all[1] == t2->src.u3.all[1] && - t1->src.u3.all[2] == t2->src.u3.all[2] && - t1->src.u3.all[3] == t2->src.u3.all[3] && + return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) && t1->src.u.all == t2->src.u.all && t1->src.l3num == t2->src.l3num); } -static inline int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1, - const struct nf_conntrack_tuple *t2) +static inline bool __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1, + const struct nf_conntrack_tuple *t2) { - return (t1->dst.u3.all[0] == t2->dst.u3.all[0] && - t1->dst.u3.all[1] == t2->dst.u3.all[1] && - t1->dst.u3.all[2] == t2->dst.u3.all[2] && - t1->dst.u3.all[3] == t2->dst.u3.all[3] && + return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) && t1->dst.u.all == t2->dst.u.all && t1->dst.protonum == t2->dst.protonum); } -static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1, - const struct nf_conntrack_tuple *t2) +static inline bool nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1, + const struct nf_conntrack_tuple *t2) { return __nf_ct_tuple_src_equal(t1, t2) && __nf_ct_tuple_dst_equal(t1, t2); } -static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1, - const struct nf_conntrack_tuple_mask *m2) +static inline bool +nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1, + const struct nf_conntrack_tuple_mask *m2) { - return (m1->src.u3.all[0] == m2->src.u3.all[0] && - m1->src.u3.all[1] == m2->src.u3.all[1] && - m1->src.u3.all[2] == m2->src.u3.all[2] && - m1->src.u3.all[3] == m2->src.u3.all[3] && + return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) && m1->src.u.all == m2->src.u.all); } -static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1, - const struct nf_conntrack_tuple *t2, - const struct nf_conntrack_tuple_mask *mask) +static inline bool +nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1, + const struct nf_conntrack_tuple *t2, + const struct nf_conntrack_tuple_mask *mask) { int count; for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) { if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) & mask->src.u3.all[count]) - return 0; + return false; } if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all) - return 0; + return false; if (t1->src.l3num != t2->src.l3num || t1->dst.protonum != t2->dst.protonum) - return 0; + return false; - return 1; + return true; } -static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple_mask *mask) +static inline bool +nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_tuple_mask *mask) { return nf_ct_tuple_src_mask_cmp(t, tuple, mask) && __nf_ct_tuple_dst_equal(t, tuple); diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h new file mode 100644 index 00000000000..034efe8d45a --- /dev/null +++ b/include/net/netfilter/nf_conntrack_zones.h @@ -0,0 +1,25 @@ +#ifndef _NF_CONNTRACK_ZONES_H +#define _NF_CONNTRACK_ZONES_H + +#define NF_CT_DEFAULT_ZONE 0 + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +#include <net/netfilter/nf_conntrack_extend.h> + +struct nf_conntrack_zone { + u16 id; +}; + +static inline u16 nf_ct_zone(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_ZONES + struct nf_conntrack_zone *nf_ct_zone; + nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE); + if (nf_ct_zone) + return nf_ct_zone->id; +#endif + return NF_CT_DEFAULT_ZONE; +} + +#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */ +#endif /* _NF_CONNTRACK_ZONES_H */ diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 8c6b5ae4553..99eac12d040 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -1,6 +1,8 @@ #ifndef _NF_LOG_H #define _NF_LOG_H +#include <linux/netfilter.h> + /* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will * disappear once iptables is replaced with pkttables. Please DO NOT use them * for any new code! */ @@ -28,7 +30,8 @@ struct nf_loginfo { } u; }; -typedef void nf_logfn(unsigned int pf, +typedef void nf_logfn(struct net *net, + u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, @@ -40,20 +43,30 @@ struct nf_logger { struct module *me; nf_logfn *logfn; char *name; + struct list_head list[NFPROTO_NUMPROTO]; }; /* Function to register/unregister log function. */ -int nf_log_register(int pf, const struct nf_logger *logger); -void nf_log_unregister(const struct nf_logger *logger); -void nf_log_unregister_pf(int pf); +int nf_log_register(u_int8_t pf, struct nf_logger *logger); +void nf_log_unregister(struct nf_logger *logger); + +void nf_log_set(struct net *net, u_int8_t pf, + const struct nf_logger *logger); +void nf_log_unset(struct net *net, const struct nf_logger *logger); + +int nf_log_bind_pf(struct net *net, u_int8_t pf, + const struct nf_logger *logger); +void nf_log_unbind_pf(struct net *net, u_int8_t pf); /* Calls the registered backend logging function */ -void nf_log_packet(int pf, +__printf(8, 9) +void nf_log_packet(struct net *net, + u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, - const char *fmt, ...) __attribute__ ((format(printf,7,8))); + const char *fmt, ...); #endif /* _NF_LOG_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index 9dc1039ff78..a71dd333ac6 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -1,97 +1,81 @@ #ifndef _NF_NAT_H #define _NF_NAT_H #include <linux/netfilter_ipv4.h> +#include <linux/netfilter/nf_nat.h> #include <net/netfilter/nf_conntrack_tuple.h> -#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16 - -enum nf_nat_manip_type -{ - IP_NAT_MANIP_SRC, - IP_NAT_MANIP_DST +enum nf_nat_manip_type { + NF_NAT_MANIP_SRC, + NF_NAT_MANIP_DST }; /* SRC manip occurs POST_ROUTING or LOCAL_IN */ #define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \ (hooknum) != NF_INET_LOCAL_IN) -#define IP_NAT_RANGE_MAP_IPS 1 -#define IP_NAT_RANGE_PROTO_SPECIFIED 2 -#define IP_NAT_RANGE_PROTO_RANDOM 4 - -/* NAT sequence number modifications */ -struct nf_nat_seq { - /* position of the last TCP sequence number modification (if any) */ - u_int32_t correction_pos; - - /* sequence number offset before and after last modification */ - int16_t offset_before, offset_after; -}; - -/* Single range specification. */ -struct nf_nat_range -{ - /* Set to OR of flags above. */ - unsigned int flags; - - /* Inclusive: network order. */ - __be32 min_ip, max_ip; - - /* Inclusive: network order */ - union nf_conntrack_man_proto min, max; -}; - -/* For backwards compat: don't use in modern code. */ -struct nf_nat_multi_range_compat -{ - unsigned int rangesize; /* Must be 1. */ - - /* hangs off end. */ - struct nf_nat_range range[1]; -}; - -#ifdef __KERNEL__ #include <linux/list.h> #include <linux/netfilter/nf_conntrack_pptp.h> #include <net/netfilter/nf_conntrack_extend.h> /* per conntrack: nat application helper private data */ -union nf_conntrack_nat_help -{ +union nf_conntrack_nat_help { /* insert nat helper private data here */ +#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE) struct nf_nat_pptp nat_pptp_info; +#endif }; struct nf_conn; /* The structure embedded in the conntrack structure. */ -struct nf_conn_nat -{ +struct nf_conn_nat { struct hlist_node bysource; - struct nf_nat_seq seq[IP_CT_DIR_MAX]; struct nf_conn *ct; union nf_conntrack_nat_help help; #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) + defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \ + defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \ + defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE) int masq_index; #endif }; /* Set up the info structure to map into this range. */ -extern unsigned int nf_nat_setup_info(struct nf_conn *ct, - const struct nf_nat_range *range, - enum nf_nat_manip_type maniptype); +unsigned int nf_nat_setup_info(struct nf_conn *ct, + const struct nf_nat_range *range, + enum nf_nat_manip_type maniptype); + +extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct, + unsigned int hooknum); + +struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct); /* Is this tuple already taken? (not by us)*/ -extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, - const struct nf_conn *ignored_conntrack); +int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, + const struct nf_conn *ignored_conntrack); static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) { +#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) return nf_ct_ext_find(ct, NF_CT_EXT_NAT); +#else + return NULL; +#endif +} + +static inline bool nf_nat_oif_changed(unsigned int hooknum, + enum ip_conntrack_info ctinfo, + struct nf_conn_nat *nat, + const struct net_device *out) +{ +#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \ + IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE) + return nat->masq_index && hooknum == NF_INET_POST_ROUTING && + CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL && + nat->masq_index != out->ifindex; +#else + return false; +#endif } -#else /* !__KERNEL__: iptables wants this to compile. */ -#define nf_nat_multi_range nf_nat_multi_range_compat -#endif /*__KERNEL__*/ #endif diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h index f29eeb9777e..fbfd1ba4254 100644 --- a/include/net/netfilter/nf_nat_core.h +++ b/include/net/netfilter/nf_nat_core.h @@ -7,22 +7,25 @@ /* This header used to share core functionality between the standalone NAT module, and the compatibility layer's use of NAT for masquerading. */ -extern unsigned int nf_nat_packet(struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff *skb); +unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int hooknum, struct sk_buff *skb); -extern int nf_nat_icmp_reply_translation(struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff *skb); +int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family); static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) { - if (manip == IP_NAT_MANIP_SRC) - return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); + if (manip == NF_NAT_MANIP_SRC) + return ct->status & IPS_SRC_NAT_DONE; else - return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status); + return ct->status & IPS_DST_NAT_DONE; } + +struct nlattr; + +extern int +(*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, + enum nf_nat_manip_type manip, + const struct nlattr *attr); + #endif /* _NF_NAT_CORE_H */ diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 58dd2268794..01bcc6bfbcc 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -7,26 +7,34 @@ struct sk_buff; /* These return true or false. */ -extern int nf_nat_mangle_tcp_packet(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len); -extern int nf_nat_mangle_udp_packet(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len); -extern int nf_nat_seq_adjust(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo); +int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned int match_offset, + unsigned int match_len, const char *rep_buffer, + unsigned int rep_len, bool adjust); + +static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned int match_offset, + unsigned int match_len, + const char *rep_buffer, + unsigned int rep_len) +{ + return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, + match_offset, match_len, + rep_buffer, rep_len, true); +} + +int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned int match_offset, + unsigned int match_len, const char *rep_buffer, + unsigned int rep_len); /* Setup NAT on this expected conntrack so it follows master, but goes * to port ct->master->saved_proto. */ -extern void nf_nat_follow_master(struct nf_conn *ct, - struct nf_conntrack_expect *this); +void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); + #endif diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h new file mode 100644 index 00000000000..5a2919b2e09 --- /dev/null +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -0,0 +1,49 @@ +#ifndef _NF_NAT_L3PROTO_H +#define _NF_NAT_L3PROTO_H + +struct nf_nat_l4proto; +struct nf_nat_l3proto { + u8 l3proto; + + bool (*in_range)(const struct nf_conntrack_tuple *t, + const struct nf_nat_range *range); + + u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16); + + bool (*manip_pkt)(struct sk_buff *skb, + unsigned int iphdroff, + const struct nf_nat_l4proto *l4proto, + const struct nf_conntrack_tuple *target, + enum nf_nat_manip_type maniptype); + + void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff, + __sum16 *check, + const struct nf_conntrack_tuple *t, + enum nf_nat_manip_type maniptype); + + void (*csum_recalc)(struct sk_buff *skb, u8 proto, + void *data, __sum16 *check, + int datalen, int oldlen); + + void (*decode_session)(struct sk_buff *skb, + const struct nf_conn *ct, + enum ip_conntrack_dir dir, + unsigned long statusbit, + struct flowi *fl); + + int (*nlattr_to_range)(struct nlattr *tb[], + struct nf_nat_range *range); +}; + +int nf_nat_l3proto_register(const struct nf_nat_l3proto *); +void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *); +const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto); + +int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int hooknum); +int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int hooknum, unsigned int hdrlen); + +#endif /* _NF_NAT_L3PROTO_H */ diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h new file mode 100644 index 00000000000..12f4cc841b6 --- /dev/null +++ b/include/net/netfilter/nf_nat_l4proto.h @@ -0,0 +1,72 @@ +/* Header for use in defining a given protocol. */ +#ifndef _NF_NAT_L4PROTO_H +#define _NF_NAT_L4PROTO_H +#include <net/netfilter/nf_nat.h> +#include <linux/netfilter/nfnetlink_conntrack.h> + +struct nf_nat_range; +struct nf_nat_l3proto; + +struct nf_nat_l4proto { + /* Protocol number. */ + u8 l4proto; + + /* Translate a packet to the target according to manip type. + * Return true if succeeded. + */ + bool (*manip_pkt)(struct sk_buff *skb, + const struct nf_nat_l3proto *l3proto, + unsigned int iphdroff, unsigned int hdroff, + const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype); + + /* Is the manipable part of the tuple between min and max incl? */ + bool (*in_range)(const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype, + const union nf_conntrack_man_proto *min, + const union nf_conntrack_man_proto *max); + + /* Alter the per-proto part of the tuple (depending on + * maniptype), to give a unique tuple in the given range if + * possible. Per-protocol part of tuple is initialized to the + * incoming packet. + */ + void (*unique_tuple)(const struct nf_nat_l3proto *l3proto, + struct nf_conntrack_tuple *tuple, + const struct nf_nat_range *range, + enum nf_nat_manip_type maniptype, + const struct nf_conn *ct); + + int (*nlattr_to_range)(struct nlattr *tb[], + struct nf_nat_range *range); +}; + +/* Protocol registration. */ +int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto); +void nf_nat_l4proto_unregister(u8 l3proto, + const struct nf_nat_l4proto *l4proto); + +const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto); + +/* Built-in protocols. */ +extern const struct nf_nat_l4proto nf_nat_l4proto_tcp; +extern const struct nf_nat_l4proto nf_nat_l4proto_udp; +extern const struct nf_nat_l4proto nf_nat_l4proto_icmp; +extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6; +extern const struct nf_nat_l4proto nf_nat_l4proto_unknown; + +bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype, + const union nf_conntrack_man_proto *min, + const union nf_conntrack_man_proto *max); + +void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, + struct nf_conntrack_tuple *tuple, + const struct nf_nat_range *range, + enum nf_nat_manip_type maniptype, + const struct nf_conn *ct, u16 *rover); + +int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], + struct nf_nat_range *range); + +#endif /*_NF_NAT_L4PROTO_H*/ diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h deleted file mode 100644 index 4aa0edbb5b9..00000000000 --- a/include/net/netfilter/nf_nat_protocol.h +++ /dev/null @@ -1,70 +0,0 @@ -/* Header for use in defining a given protocol. */ -#ifndef _NF_NAT_PROTOCOL_H -#define _NF_NAT_PROTOCOL_H -#include <net/netfilter/nf_nat.h> -#include <linux/netfilter/nfnetlink_conntrack.h> - -struct nf_nat_range; - -struct nf_nat_protocol -{ - /* Protocol name */ - const char *name; - - /* Protocol number. */ - unsigned int protonum; - - struct module *me; - - /* Translate a packet to the target according to manip type. - Return true if succeeded. */ - int (*manip_pkt)(struct sk_buff *skb, - unsigned int iphdroff, - const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype); - - /* Is the manipable part of the tuple between min and max incl? */ - int (*in_range)(const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype, - const union nf_conntrack_man_proto *min, - const union nf_conntrack_man_proto *max); - - /* Alter the per-proto part of the tuple (depending on - maniptype), to give a unique tuple in the given range if - possible; return false if not. Per-protocol part of tuple - is initialized to the incoming packet. */ - int (*unique_tuple)(struct nf_conntrack_tuple *tuple, - const struct nf_nat_range *range, - enum nf_nat_manip_type maniptype, - const struct nf_conn *ct); - - int (*range_to_nlattr)(struct sk_buff *skb, - const struct nf_nat_range *range); - - int (*nlattr_to_range)(struct nlattr *tb[], - struct nf_nat_range *range); -}; - -/* Protocol registration. */ -extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto); -extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto); - -extern const struct nf_nat_protocol *nf_nat_proto_find_get(u_int8_t protocol); -extern void nf_nat_proto_put(const struct nf_nat_protocol *proto); - -/* Built-in protocols. */ -extern const struct nf_nat_protocol nf_nat_protocol_tcp; -extern const struct nf_nat_protocol nf_nat_protocol_udp; -extern const struct nf_nat_protocol nf_nat_protocol_icmp; -extern const struct nf_nat_protocol nf_nat_unknown_protocol; - -extern int init_protocols(void) __init; -extern void cleanup_protocols(void); -extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); - -extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb, - const struct nf_nat_range *range); -extern int nf_nat_port_nlattr_to_range(struct nlattr *tb[], - struct nf_nat_range *range); - -#endif /*_NF_NAT_PROTO_H*/ diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h deleted file mode 100644 index 75d1825031d..00000000000 --- a/include/net/netfilter/nf_nat_rule.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _NF_NAT_RULE_H -#define _NF_NAT_RULE_H -#include <net/netfilter/nf_conntrack.h> -#include <net/netfilter/nf_nat.h> -#include <linux/netfilter_ipv4/ip_tables.h> - -extern int nf_nat_rule_init(void) __init; -extern void nf_nat_rule_cleanup(void); -extern int nf_nat_rule_find(struct sk_buff *skb, - unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct nf_conn *ct); - -extern unsigned int -alloc_null_binding(struct nf_conn *ct, unsigned int hooknum); - -extern unsigned int -alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum); -#endif /* _NF_NAT_RULE_H */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index d030044e923..84a53d78030 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -1,6 +1,10 @@ #ifndef _NF_QUEUE_H #define _NF_QUEUE_H +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/jhash.h> + /* Each queued (to userspace) skbuff has one of these. */ struct nf_queue_entry { struct list_head list; @@ -8,11 +12,14 @@ struct nf_queue_entry { unsigned int id; struct nf_hook_ops *elem; - int pf; + u_int8_t pf; + u16 size; /* sizeof(entry) + saved route keys */ unsigned int hook; struct net_device *indev; struct net_device *outdev; int (*okfn)(struct sk_buff *); + + /* extra space to store route keys */ }; #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) @@ -21,14 +28,71 @@ struct nf_queue_entry { struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *entry, unsigned int queuenum); - char *name; }; -extern int nf_register_queue_handler(int pf, - const struct nf_queue_handler *qh); -extern int nf_unregister_queue_handler(int pf, - const struct nf_queue_handler *qh); -extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); -extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); +void nf_register_queue_handler(const struct nf_queue_handler *qh); +void nf_unregister_queue_handler(void); +void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); + +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); +void nf_queue_entry_release_refs(struct nf_queue_entry *entry); + +static inline void init_hashrandom(u32 *jhash_initval) +{ + while (*jhash_initval == 0) + *jhash_initval = prandom_u32(); +} + +static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval) +{ + const struct iphdr *iph = ip_hdr(skb); + + /* packets in either direction go into same queue */ + if ((__force u32)iph->saddr < (__force u32)iph->daddr) + return jhash_3words((__force u32)iph->saddr, + (__force u32)iph->daddr, iph->protocol, jhash_initval); + + return jhash_3words((__force u32)iph->daddr, + (__force u32)iph->saddr, iph->protocol, jhash_initval); +} + +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + u32 a, b, c; + + if ((__force u32)ip6h->saddr.s6_addr32[3] < + (__force u32)ip6h->daddr.s6_addr32[3]) { + a = (__force u32) ip6h->saddr.s6_addr32[3]; + b = (__force u32) ip6h->daddr.s6_addr32[3]; + } else { + b = (__force u32) ip6h->saddr.s6_addr32[3]; + a = (__force u32) ip6h->daddr.s6_addr32[3]; + } + + if ((__force u32)ip6h->saddr.s6_addr32[1] < + (__force u32)ip6h->daddr.s6_addr32[1]) + c = (__force u32) ip6h->saddr.s6_addr32[1]; + else + c = (__force u32) ip6h->daddr.s6_addr32[1]; + + return jhash_3words(a, b, c, jhash_initval); +} +#endif + +static inline u32 +nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, + u32 jhash_initval) +{ + if (family == NFPROTO_IPV4) + queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32; +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) + else if (family == NFPROTO_IPV6) + queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32; +#endif + + return queue; +} #endif /* _NF_QUEUE_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h new file mode 100644 index 00000000000..c4d86198d3d --- /dev/null +++ b/include/net/netfilter/nf_tables.h @@ -0,0 +1,655 @@ +#ifndef _NET_NF_TABLES_H +#define _NET_NF_TABLES_H + +#include <linux/list.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nfnetlink.h> +#include <linux/netfilter/x_tables.h> +#include <linux/netfilter/nf_tables.h> +#include <linux/u64_stats_sync.h> +#include <net/netlink.h> + +#define NFT_JUMP_STACK_SIZE 16 + +struct nft_pktinfo { + struct sk_buff *skb; + const struct net_device *in; + const struct net_device *out; + const struct nf_hook_ops *ops; + u8 nhoff; + u8 thoff; + u8 tprot; + /* for x_tables compatibility */ + struct xt_action_param xt; +}; + +static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out) +{ + pkt->skb = skb; + pkt->in = pkt->xt.in = in; + pkt->out = pkt->xt.out = out; + pkt->ops = ops; + pkt->xt.hooknum = ops->hooknum; + pkt->xt.family = ops->pf; +} + +struct nft_data { + union { + u32 data[4]; + struct { + u32 verdict; + struct nft_chain *chain; + }; + }; +} __attribute__((aligned(__alignof__(u64)))); + +static inline int nft_data_cmp(const struct nft_data *d1, + const struct nft_data *d2, + unsigned int len) +{ + return memcmp(d1->data, d2->data, len); +} + +static inline void nft_data_copy(struct nft_data *dst, + const struct nft_data *src) +{ + BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64)); + *(u64 *)&dst->data[0] = *(u64 *)&src->data[0]; + *(u64 *)&dst->data[2] = *(u64 *)&src->data[2]; +} + +static inline void nft_data_debug(const struct nft_data *data) +{ + pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", + data->data[0], data->data[1], + data->data[2], data->data[3]); +} + +/** + * struct nft_ctx - nf_tables rule/set context + * + * @net: net namespace + * @afi: address family info + * @table: the table the chain is contained in + * @chain: the chain the rule is contained in + * @nla: netlink attributes + * @portid: netlink portID of the original message + * @seq: netlink sequence number + * @report: notify via unicast netlink message + */ +struct nft_ctx { + struct net *net; + struct nft_af_info *afi; + struct nft_table *table; + struct nft_chain *chain; + const struct nlattr * const *nla; + u32 portid; + u32 seq; + bool report; +}; + +struct nft_data_desc { + enum nft_data_types type; + unsigned int len; +}; + +int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, + struct nft_data_desc *desc, const struct nlattr *nla); +void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); +int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, + enum nft_data_types type, unsigned int len); + +static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg) +{ + return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE; +} + +static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) +{ + return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1; +} + +int nft_validate_input_register(enum nft_registers reg); +int nft_validate_output_register(enum nft_registers reg); +int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, + const struct nft_data *data, + enum nft_data_types type); + +/** + * struct nft_set_elem - generic representation of set elements + * + * @cookie: implementation specific element cookie + * @key: element key + * @data: element data (maps only) + * @flags: element flags (end of interval) + * + * The cookie can be used to store a handle to the element for subsequent + * removal. + */ +struct nft_set_elem { + void *cookie; + struct nft_data key; + struct nft_data data; + u32 flags; +}; + +struct nft_set; +struct nft_set_iter { + unsigned int count; + unsigned int skip; + int err; + int (*fn)(const struct nft_ctx *ctx, + const struct nft_set *set, + const struct nft_set_iter *iter, + const struct nft_set_elem *elem); +}; + +/** + * struct nft_set_desc - description of set elements + * + * @klen: key length + * @dlen: data length + * @size: number of set elements + */ +struct nft_set_desc { + unsigned int klen; + unsigned int dlen; + unsigned int size; +}; + +/** + * enum nft_set_class - performance class + * + * @NFT_LOOKUP_O_1: constant, O(1) + * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N) + * @NFT_LOOKUP_O_N: linear, O(N) + */ +enum nft_set_class { + NFT_SET_CLASS_O_1, + NFT_SET_CLASS_O_LOG_N, + NFT_SET_CLASS_O_N, +}; + +/** + * struct nft_set_estimate - estimation of memory and performance + * characteristics + * + * @size: required memory + * @class: lookup performance class + */ +struct nft_set_estimate { + unsigned int size; + enum nft_set_class class; +}; + +/** + * struct nft_set_ops - nf_tables set operations + * + * @lookup: look up an element within the set + * @insert: insert new element into set + * @remove: remove element from set + * @walk: iterate over all set elemeennts + * @privsize: function to return size of set private data + * @init: initialize private data of new set instance + * @destroy: destroy private data of set instance + * @list: nf_tables_set_ops list node + * @owner: module reference + * @features: features supported by the implementation + */ +struct nft_set_ops { + bool (*lookup)(const struct nft_set *set, + const struct nft_data *key, + struct nft_data *data); + int (*get)(const struct nft_set *set, + struct nft_set_elem *elem); + int (*insert)(const struct nft_set *set, + const struct nft_set_elem *elem); + void (*remove)(const struct nft_set *set, + const struct nft_set_elem *elem); + void (*walk)(const struct nft_ctx *ctx, + const struct nft_set *set, + struct nft_set_iter *iter); + + unsigned int (*privsize)(const struct nlattr * const nla[]); + bool (*estimate)(const struct nft_set_desc *desc, + u32 features, + struct nft_set_estimate *est); + int (*init)(const struct nft_set *set, + const struct nft_set_desc *desc, + const struct nlattr * const nla[]); + void (*destroy)(const struct nft_set *set); + + struct list_head list; + struct module *owner; + u32 features; +}; + +int nft_register_set(struct nft_set_ops *ops); +void nft_unregister_set(struct nft_set_ops *ops); + +/** + * struct nft_set - nf_tables set instance + * + * @list: table set list node + * @bindings: list of set bindings + * @name: name of the set + * @ktype: key type (numeric type defined by userspace, not used in the kernel) + * @dtype: data type (verdict or numeric type defined by userspace) + * @size: maximum set size + * @nelems: number of elements + * @ops: set ops + * @flags: set flags + * @klen: key length + * @dlen: data length + * @data: private set data + */ +struct nft_set { + struct list_head list; + struct list_head bindings; + char name[IFNAMSIZ]; + u32 ktype; + u32 dtype; + u32 size; + u32 nelems; + /* runtime data below here */ + const struct nft_set_ops *ops ____cacheline_aligned; + u16 flags; + u8 klen; + u8 dlen; + unsigned char data[] + __attribute__((aligned(__alignof__(u64)))); +}; + +static inline void *nft_set_priv(const struct nft_set *set) +{ + return (void *)set->data; +} + +struct nft_set *nf_tables_set_lookup(const struct nft_table *table, + const struct nlattr *nla); +struct nft_set *nf_tables_set_lookup_byid(const struct net *net, + const struct nlattr *nla); + +/** + * struct nft_set_binding - nf_tables set binding + * + * @list: set bindings list node + * @chain: chain containing the rule bound to the set + * + * A set binding contains all information necessary for validation + * of new elements added to a bound set. + */ +struct nft_set_binding { + struct list_head list; + const struct nft_chain *chain; +}; + +int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding); +void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding); + + +/** + * struct nft_expr_type - nf_tables expression type + * + * @select_ops: function to select nft_expr_ops + * @ops: default ops, used when no select_ops functions is present + * @list: used internally + * @name: Identifier + * @owner: module reference + * @policy: netlink attribute policy + * @maxattr: highest netlink attribute number + * @family: address family for AF-specific types + */ +struct nft_expr_type { + const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, + const struct nlattr * const tb[]); + const struct nft_expr_ops *ops; + struct list_head list; + const char *name; + struct module *owner; + const struct nla_policy *policy; + unsigned int maxattr; + u8 family; +}; + +/** + * struct nft_expr_ops - nf_tables expression operations + * + * @eval: Expression evaluation function + * @size: full expression size, including private data size + * @init: initialization function + * @destroy: destruction function + * @dump: function to dump parameters + * @type: expression type + * @validate: validate expression, called during loop detection + * @data: extra data to attach to this expression operation + */ +struct nft_expr; +struct nft_expr_ops { + void (*eval)(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt); + unsigned int size; + + int (*init)(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + void (*destroy)(const struct nft_ctx *ctx, + const struct nft_expr *expr); + int (*dump)(struct sk_buff *skb, + const struct nft_expr *expr); + int (*validate)(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data); + const struct nft_expr_type *type; + void *data; +}; + +#define NFT_EXPR_MAXATTR 16 +#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \ + ALIGN(size, __alignof__(struct nft_expr))) + +/** + * struct nft_expr - nf_tables expression + * + * @ops: expression ops + * @data: expression private data + */ +struct nft_expr { + const struct nft_expr_ops *ops; + unsigned char data[]; +}; + +static inline void *nft_expr_priv(const struct nft_expr *expr) +{ + return (void *)expr->data; +} + +/** + * struct nft_rule - nf_tables rule + * + * @list: used internally + * @handle: rule handle + * @genmask: generation mask + * @dlen: length of expression data + * @ulen: length of user data (used for comments) + * @data: expression data + */ +struct nft_rule { + struct list_head list; + u64 handle:42, + genmask:2, + dlen:12, + ulen:8; + unsigned char data[] + __attribute__((aligned(__alignof__(struct nft_expr)))); +}; + +/** + * struct nft_trans - nf_tables object update in transaction + * + * @rcu_head: rcu head to defer release of transaction data + * @list: used internally + * @msg_type: message type + * @ctx: transaction context + * @data: internal information related to the transaction + */ +struct nft_trans { + struct rcu_head rcu_head; + struct list_head list; + int msg_type; + struct nft_ctx ctx; + char data[0]; +}; + +struct nft_trans_rule { + struct nft_rule *rule; +}; + +#define nft_trans_rule(trans) \ + (((struct nft_trans_rule *)trans->data)->rule) + +struct nft_trans_set { + struct nft_set *set; + u32 set_id; +}; + +#define nft_trans_set(trans) \ + (((struct nft_trans_set *)trans->data)->set) +#define nft_trans_set_id(trans) \ + (((struct nft_trans_set *)trans->data)->set_id) + +struct nft_trans_chain { + bool update; + char name[NFT_CHAIN_MAXNAMELEN]; + struct nft_stats __percpu *stats; + u8 policy; +}; + +#define nft_trans_chain_update(trans) \ + (((struct nft_trans_chain *)trans->data)->update) +#define nft_trans_chain_name(trans) \ + (((struct nft_trans_chain *)trans->data)->name) +#define nft_trans_chain_stats(trans) \ + (((struct nft_trans_chain *)trans->data)->stats) +#define nft_trans_chain_policy(trans) \ + (((struct nft_trans_chain *)trans->data)->policy) + +struct nft_trans_table { + bool update; + bool enable; +}; + +#define nft_trans_table_update(trans) \ + (((struct nft_trans_table *)trans->data)->update) +#define nft_trans_table_enable(trans) \ + (((struct nft_trans_table *)trans->data)->enable) + +struct nft_trans_elem { + struct nft_set *set; + struct nft_set_elem elem; +}; + +#define nft_trans_elem_set(trans) \ + (((struct nft_trans_elem *)trans->data)->set) +#define nft_trans_elem(trans) \ + (((struct nft_trans_elem *)trans->data)->elem) + +static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule) +{ + return (struct nft_expr *)&rule->data[0]; +} + +static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr) +{ + return ((void *)expr) + expr->ops->size; +} + +static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) +{ + return (struct nft_expr *)&rule->data[rule->dlen]; +} + +static inline void *nft_userdata(const struct nft_rule *rule) +{ + return (void *)&rule->data[rule->dlen]; +} + +/* + * The last pointer isn't really necessary, but the compiler isn't able to + * determine that the result of nft_expr_last() is always the same since it + * can't assume that the dlen value wasn't changed within calls in the loop. + */ +#define nft_rule_for_each_expr(expr, last, rule) \ + for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \ + (expr) != (last); \ + (expr) = nft_expr_next(expr)) + +enum nft_chain_flags { + NFT_BASE_CHAIN = 0x1, + NFT_CHAIN_INACTIVE = 0x2, +}; + +/** + * struct nft_chain - nf_tables chain + * + * @rules: list of rules in the chain + * @list: used internally + * @net: net namespace that this chain belongs to + * @table: table that this chain belongs to + * @handle: chain handle + * @use: number of jump references to this chain + * @level: length of longest path to this chain + * @flags: bitmask of enum nft_chain_flags + * @name: name of the chain + */ +struct nft_chain { + struct list_head rules; + struct list_head list; + struct net *net; + struct nft_table *table; + u64 handle; + u32 use; + u16 level; + u8 flags; + char name[NFT_CHAIN_MAXNAMELEN]; +}; + +enum nft_chain_type { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE, + NFT_CHAIN_T_NAT, + NFT_CHAIN_T_MAX +}; + +struct nft_stats { + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; +}; + +#define NFT_HOOK_OPS_MAX 2 + +/** + * struct nft_base_chain - nf_tables base chain + * + * @ops: netfilter hook ops + * @type: chain type + * @policy: default policy + * @stats: per-cpu chain stats + * @chain: the chain + */ +struct nft_base_chain { + struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; + const struct nf_chain_type *type; + u8 policy; + struct nft_stats __percpu *stats; + struct nft_chain chain; +}; + +static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) +{ + return container_of(chain, struct nft_base_chain, chain); +} + +unsigned int nft_do_chain(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops); + +/** + * struct nft_table - nf_tables table + * + * @list: used internally + * @chains: chains in the table + * @sets: sets in the table + * @hgenerator: handle generator state + * @use: number of chain references to this table + * @flags: table flag (see enum nft_table_flags) + * @name: name of the table + */ +struct nft_table { + struct list_head list; + struct list_head chains; + struct list_head sets; + u64 hgenerator; + u32 use; + u16 flags; + char name[]; +}; + +/** + * struct nft_af_info - nf_tables address family info + * + * @list: used internally + * @family: address family + * @nhooks: number of hooks in this family + * @owner: module owner + * @tables: used internally + * @nops: number of hook ops in this family + * @hook_ops_init: initialization function for chain hook ops + * @hooks: hookfn overrides for packet validation + */ +struct nft_af_info { + struct list_head list; + int family; + unsigned int nhooks; + struct module *owner; + struct list_head tables; + unsigned int nops; + void (*hook_ops_init)(struct nf_hook_ops *, + unsigned int); + nf_hookfn *hooks[NF_MAX_HOOKS]; +}; + +int nft_register_afinfo(struct net *, struct nft_af_info *); +void nft_unregister_afinfo(struct nft_af_info *); + +/** + * struct nf_chain_type - nf_tables chain type info + * + * @name: name of the type + * @type: numeric identifier + * @family: address family + * @owner: module owner + * @hook_mask: mask of valid hooks + * @hooks: hookfn overrides + */ +struct nf_chain_type { + const char *name; + enum nft_chain_type type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[NF_MAX_HOOKS]; +}; + +int nft_register_chain_type(const struct nf_chain_type *); +void nft_unregister_chain_type(const struct nf_chain_type *); + +int nft_register_expr(struct nft_expr_type *); +void nft_unregister_expr(struct nft_expr_type *); + +#define nft_dereference(p) \ + nfnl_dereference(p, NFNL_SUBSYS_NFTABLES) + +#define MODULE_ALIAS_NFT_FAMILY(family) \ + MODULE_ALIAS("nft-afinfo-" __stringify(family)) + +#define MODULE_ALIAS_NFT_CHAIN(family, name) \ + MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) + +#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \ + MODULE_ALIAS("nft-expr-" __stringify(family) "-" name) + +#define MODULE_ALIAS_NFT_EXPR(name) \ + MODULE_ALIAS("nft-expr-" name) + +#define MODULE_ALIAS_NFT_SET() \ + MODULE_ALIAS("nft-set") + +#endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h new file mode 100644 index 00000000000..a75fc8e27cd --- /dev/null +++ b/include/net/netfilter/nf_tables_core.h @@ -0,0 +1,52 @@ +#ifndef _NET_NF_TABLES_CORE_H +#define _NET_NF_TABLES_CORE_H + +int nf_tables_core_module_init(void); +void nf_tables_core_module_exit(void); + +int nft_immediate_module_init(void); +void nft_immediate_module_exit(void); + +struct nft_cmp_fast_expr { + u32 data; + enum nft_registers sreg:8; + u8 len; +}; + +/* Calculate the mask for the nft_cmp_fast expression. On big endian the + * mask needs to include the *upper* bytes when interpreting that data as + * something smaller than the full u32, therefore a cpu_to_le32 is done. + */ +static inline u32 nft_cmp_fast_mask(unsigned int len) +{ + return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr, + data) * BITS_PER_BYTE - len)); +} + +extern const struct nft_expr_ops nft_cmp_fast_ops; + +int nft_cmp_module_init(void); +void nft_cmp_module_exit(void); + +int nft_lookup_module_init(void); +void nft_lookup_module_exit(void); + +int nft_bitwise_module_init(void); +void nft_bitwise_module_exit(void); + +int nft_byteorder_module_init(void); +void nft_byteorder_module_exit(void); + +struct nft_payload { + enum nft_payload_bases base:8; + u8 offset; + u8 len; + enum nft_registers dreg:8; +}; + +extern const struct nft_expr_ops nft_payload_fast_ops; + +int nft_payload_module_init(void); +void nft_payload_module_exit(void); + +#endif /* _NET_NF_TABLES_CORE_H */ diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h new file mode 100644 index 00000000000..cba143fbd2e --- /dev/null +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -0,0 +1,26 @@ +#ifndef _NF_TABLES_IPV4_H_ +#define _NF_TABLES_IPV4_H_ + +#include <net/netfilter/nf_tables.h> +#include <net/ip.h> + +static inline void +nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out) +{ + struct iphdr *ip; + + nft_set_pktinfo(pkt, ops, skb, in, out); + + ip = ip_hdr(pkt->skb); + pkt->tprot = ip->protocol; + pkt->xt.thoff = ip_hdrlen(pkt->skb); + pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; +} + +extern struct nft_af_info nft_af_ipv4; + +#endif diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h new file mode 100644 index 00000000000..74d97613765 --- /dev/null +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -0,0 +1,33 @@ +#ifndef _NF_TABLES_IPV6_H_ +#define _NF_TABLES_IPV6_H_ + +#include <linux/netfilter_ipv6/ip6_tables.h> +#include <net/ipv6.h> + +static inline int +nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out) +{ + int protohdr, thoff = 0; + unsigned short frag_off; + + nft_set_pktinfo(pkt, ops, skb, in, out); + + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); + /* If malformed, drop it */ + if (protohdr < 0) + return -1; + + pkt->tprot = protohdr; + pkt->xt.thoff = thoff; + pkt->xt.fragoff = frag_off; + + return 0; +} + +extern struct nft_af_info nft_af_ipv6; + +#endif diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h new file mode 100644 index 00000000000..5ca3f14f099 --- /dev/null +++ b/include/net/netfilter/nfnetlink_log.h @@ -0,0 +1,17 @@ +#ifndef _KER_NFNETLINK_LOG_H +#define _KER_NFNETLINK_LOG_H + +void +nfulnl_log_packet(struct net *net, + u_int8_t pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *li_user, + const char *prefix); + +#define NFULNL_COPY_DISABLED 0xff + +#endif /* _KER_NFNETLINK_LOG_H */ + diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h new file mode 100644 index 00000000000..aff88ba9139 --- /dev/null +++ b/include/net/netfilter/nfnetlink_queue.h @@ -0,0 +1,51 @@ +#ifndef _NET_NFNL_QUEUE_H_ +#define _NET_NFNL_QUEUE_H_ + +#include <linux/netfilter/nf_conntrack_common.h> + +struct nf_conn; + +#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT +struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size, + enum ip_conntrack_info *ctinfo); +struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb, + const struct nlattr *attr, + enum ip_conntrack_info *ctinfo); +int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo); +void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, int diff); +int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, + u32 portid, u32 report); +#else +inline struct nf_conn * +nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo) +{ + return NULL; +} + +inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb, + const struct nlattr *attr, + enum ip_conntrack_info *ctinfo) +{ + return NULL; +} + +inline int +nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) +{ + return 0; +} + +inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, int diff) +{ +} + +inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, + u32 portid, u32 report) +{ + return 0; +} +#endif /* NF_CONNTRACK */ +#endif diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h new file mode 100644 index 00000000000..0ee47c3e2e3 --- /dev/null +++ b/include/net/netfilter/nft_meta.h @@ -0,0 +1,36 @@ +#ifndef _NFT_META_H_ +#define _NFT_META_H_ + +struct nft_meta { + enum nft_meta_keys key:8; + union { + enum nft_registers dreg:8; + enum nft_registers sreg:8; + }; +}; + +extern const struct nla_policy nft_meta_policy[]; + +int nft_meta_get_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_meta_set_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_meta_get_dump(struct sk_buff *skb, + const struct nft_expr *expr); + +int nft_meta_set_dump(struct sk_buff *skb, + const struct nft_expr *expr); + +void nft_meta_get_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt); + +void nft_meta_set_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt); + +#endif diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h new file mode 100644 index 00000000000..36b0da2d55b --- /dev/null +++ b/include/net/netfilter/nft_reject.h @@ -0,0 +1,25 @@ +#ifndef _NFT_REJECT_H_ +#define _NFT_REJECT_H_ + +struct nft_reject { + enum nft_reject_types type:8; + u8 icmp_code; +}; + +extern const struct nla_policy nft_reject_policy[]; + +int nft_reject_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr); + +void nft_reject_ipv4_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt); + +void nft_reject_ipv6_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt); + +#endif diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h new file mode 100644 index 00000000000..9d9756cca01 --- /dev/null +++ b/include/net/netfilter/xt_log.h @@ -0,0 +1,54 @@ +#define S_SIZE (1024 - (sizeof(unsigned int) + 1)) + +struct sbuff { + unsigned int count; + char buf[S_SIZE + 1]; +}; +static struct sbuff emergency, *emergency_ptr = &emergency; + +static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...) +{ + va_list args; + int len; + + if (likely(m->count < S_SIZE)) { + va_start(args, f); + len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args); + va_end(args); + if (likely(m->count + len < S_SIZE)) { + m->count += len; + return 0; + } + } + m->count = S_SIZE; + printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n"); + return -1; +} + +static struct sbuff *sb_open(void) +{ + struct sbuff *m = kmalloc(sizeof(*m), GFP_ATOMIC); + + if (unlikely(!m)) { + local_bh_disable(); + do { + m = xchg(&emergency_ptr, NULL); + } while (!m); + } + m->count = 0; + return m; +} + +static void sb_close(struct sbuff *m) +{ + m->buf[m->count] = 0; + printk("%s\n", m->buf); + + if (likely(m != &emergency)) + kfree(m); + else { + emergency_ptr = m; + local_bh_enable(); + } +} + diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h index 65d594dffbf..79f45e19f31 100644 --- a/include/net/netfilter/xt_rateest.h +++ b/include/net/netfilter/xt_rateest.h @@ -2,16 +2,21 @@ #define _XT_RATEEST_H struct xt_rateest { + /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */ + struct gnet_stats_basic_packed bstats; + spinlock_t lock; + /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */ + struct gnet_stats_rate_est64 rstats; + + /* following fields not accessed in hot path */ struct hlist_node list; char name[IFNAMSIZ]; unsigned int refcnt; - spinlock_t lock; struct gnet_estimator params; - struct gnet_stats_rate_est rstats; - struct gnet_stats_basic bstats; + struct rcu_head rcu; }; -extern struct xt_rateest *xt_rateest_lookup(const char *name); -extern void xt_rateest_put(struct xt_rateest *est); +struct xt_rateest *xt_rateest_lookup(const char *name); +void xt_rateest_put(struct xt_rateest *est); #endif /* _XT_RATEEST_H */ diff --git a/include/net/netlabel.h b/include/net/netlabel.h index 0ca67d73c7a..4fe018c48ed 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -4,12 +4,12 @@ * The NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ /* - * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 + * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,8 +22,7 @@ * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * */ @@ -31,10 +30,14 @@ #define _NETLABEL_H #include <linux/types.h> +#include <linux/slab.h> #include <linux/net.h> #include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/in6.h> #include <net/netlink.h> -#include <asm/atomic.h> +#include <net/request_sock.h> +#include <linux/atomic.h> struct cipso_v4_doi; @@ -72,8 +75,10 @@ struct cipso_v4_doi; /* NetLabel NETLINK protocol version * 1: initial version * 2: added static labels for unlabeled connections + * 3: network selectors added to the NetLabel/LSM domain mapping and the + * CIPSO_V4_MAP_LOCAL CIPSO mapping was added */ -#define NETLBL_PROTO_VERSION 2 +#define NETLBL_PROTO_VERSION 3 /* NetLabel NETLINK types/families */ #define NETLBL_NLTYPE_NONE 0 @@ -87,6 +92,8 @@ struct cipso_v4_doi; #define NETLBL_NLTYPE_CIPSOV6_NAME "NLBL_CIPSOv6" #define NETLBL_NLTYPE_UNLABELED 5 #define NETLBL_NLTYPE_UNLABELED_NAME "NLBL_UNLBL" +#define NETLBL_NLTYPE_ADDRSELECT 6 +#define NETLBL_NLTYPE_ADDRSELECT_NAME "NLBL_ADRSEL" /* * NetLabel - Kernel API for accessing the network packet label mappings. @@ -102,7 +109,8 @@ struct cipso_v4_doi; /* NetLabel audit information */ struct netlbl_audit { u32 secid; - uid_t loginuid; + kuid_t loginuid; + unsigned int sessionid; }; /* @@ -162,7 +170,7 @@ struct netlbl_lsm_secattr_catmap { /** * struct netlbl_lsm_secattr - NetLabel LSM security attributes - * @flags: indicate which attributes are contained in this structure + * @flags: indicate structure attributes, see NETLBL_SECATTR_* * @type: indicate the NLTYPE of the attributes * @domain: the NetLabel LSM domain * @cache: NetLabel LSM specific cache @@ -180,21 +188,26 @@ struct netlbl_lsm_secattr_catmap { * NetLabel itself when returning security attributes to the LSM. * */ +struct netlbl_lsm_secattr { + u32 flags; + /* bitmap values for 'flags' */ #define NETLBL_SECATTR_NONE 0x00000000 #define NETLBL_SECATTR_DOMAIN 0x00000001 +#define NETLBL_SECATTR_DOMAIN_CPY (NETLBL_SECATTR_DOMAIN | \ + NETLBL_SECATTR_FREE_DOMAIN) #define NETLBL_SECATTR_CACHE 0x00000002 #define NETLBL_SECATTR_MLS_LVL 0x00000004 #define NETLBL_SECATTR_MLS_CAT 0x00000008 #define NETLBL_SECATTR_SECID 0x00000010 + /* bitmap meta-values for 'flags' */ +#define NETLBL_SECATTR_FREE_DOMAIN 0x01000000 #define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \ NETLBL_SECATTR_MLS_CAT | \ NETLBL_SECATTR_SECID) -struct netlbl_lsm_secattr { - u32 flags; u32 type; char *domain; struct netlbl_lsm_cache *cache; - union { + struct { struct { struct netlbl_lsm_secattr_catmap *cat; u32 lvl; @@ -303,7 +316,8 @@ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr) */ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) { - kfree(secattr->domain); + if (secattr->flags & NETLBL_SECATTR_FREE_DOMAIN) + kfree(secattr->domain); if (secattr->flags & NETLBL_SECATTR_CACHE) netlbl_secattr_cache_free(secattr->cache); if (secattr->flags & NETLBL_SECATTR_MLS_CAT) @@ -342,16 +356,37 @@ static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr) /* * LSM configuration operations */ -int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info); -int netlbl_cfg_unlbl_add_map(const char *domain, +int netlbl_cfg_map_del(const char *domain, + u16 family, + const void *addr, + const void *mask, + struct netlbl_audit *audit_info); +int netlbl_cfg_unlbl_map_add(const char *domain, + u16 family, + const void *addr, + const void *mask, struct netlbl_audit *audit_info); +int netlbl_cfg_unlbl_static_add(struct net *net, + const char *dev_name, + const void *addr, + const void *mask, + u16 family, + u32 secid, + struct netlbl_audit *audit_info); +int netlbl_cfg_unlbl_static_del(struct net *net, + const char *dev_name, + const void *addr, + const void *mask, + u16 family, + struct netlbl_audit *audit_info); int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info); -int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def, +void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info); +int netlbl_cfg_cipsov4_map_add(u32 doi, const char *domain, + const struct in_addr *addr, + const struct in_addr *mask, struct netlbl_audit *audit_info); -int netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info); - /* * LSM security attribute operations */ @@ -372,13 +407,24 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap, */ int netlbl_enabled(void); int netlbl_sock_setattr(struct sock *sk, + u16 family, const struct netlbl_lsm_secattr *secattr); +void netlbl_sock_delattr(struct sock *sk); int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); +int netlbl_conn_setattr(struct sock *sk, + struct sockaddr *addr, + const struct netlbl_lsm_secattr *secattr); +int netlbl_req_setattr(struct request_sock *req, + const struct netlbl_lsm_secattr *secattr); +void netlbl_req_delattr(struct request_sock *req); +int netlbl_skbuff_setattr(struct sk_buff *skb, + u16 family, + const struct netlbl_lsm_secattr *secattr); int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr); -void netlbl_skbuff_err(struct sk_buff *skb, int error); +void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway); /* * LSM label mapping cache operations @@ -386,33 +432,66 @@ void netlbl_skbuff_err(struct sk_buff *skb, int error); void netlbl_cache_invalidate(void); int netlbl_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr); + +/* + * Protocol engine operations + */ +struct audit_buffer *netlbl_audit_start(int type, + struct netlbl_audit *audit_info); #else static inline int netlbl_cfg_map_del(const char *domain, + u16 family, + const void *addr, + const void *mask, struct netlbl_audit *audit_info) { return -ENOSYS; } -static inline int netlbl_cfg_unlbl_add_map(const char *domain, +static inline int netlbl_cfg_unlbl_map_add(const char *domain, + u16 family, + void *addr, + void *mask, struct netlbl_audit *audit_info) { return -ENOSYS; } -static inline int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, - struct netlbl_audit *audit_info) +static inline int netlbl_cfg_unlbl_static_add(struct net *net, + const char *dev_name, + const void *addr, + const void *mask, + u16 family, + u32 secid, + struct netlbl_audit *audit_info) { return -ENOSYS; } -static inline int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def, - const char *domain, - struct netlbl_audit *audit_info) +static inline int netlbl_cfg_unlbl_static_del(struct net *net, + const char *dev_name, + const void *addr, + const void *mask, + u16 family, + struct netlbl_audit *audit_info) { return -ENOSYS; } -static inline int netlbl_cfg_cipsov4_del(u32 doi, +static inline int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { return -ENOSYS; } +static inline void netlbl_cfg_cipsov4_del(u32 doi, + struct netlbl_audit *audit_info) +{ + return; +} +static inline int netlbl_cfg_cipsov4_map_add(u32 doi, + const char *domain, + const struct in_addr *addr, + const struct in_addr *mask, + struct netlbl_audit *audit_info) +{ + return -ENOSYS; +} static inline int netlbl_secattr_catmap_walk( struct netlbl_lsm_secattr_catmap *catmap, u32 offset) @@ -445,22 +524,49 @@ static inline int netlbl_enabled(void) return 0; } static inline int netlbl_sock_setattr(struct sock *sk, - const struct netlbl_lsm_secattr *secattr) + u16 family, + const struct netlbl_lsm_secattr *secattr) { return -ENOSYS; } +static inline void netlbl_sock_delattr(struct sock *sk) +{ +} static inline int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { return -ENOSYS; } +static inline int netlbl_conn_setattr(struct sock *sk, + struct sockaddr *addr, + const struct netlbl_lsm_secattr *secattr) +{ + return -ENOSYS; +} +static inline int netlbl_req_setattr(struct request_sock *req, + const struct netlbl_lsm_secattr *secattr) +{ + return -ENOSYS; +} +static inline void netlbl_req_delattr(struct request_sock *req) +{ + return; +} +static inline int netlbl_skbuff_setattr(struct sk_buff *skb, + u16 family, + const struct netlbl_lsm_secattr *secattr) +{ + return -ENOSYS; +} static inline int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr) { return -ENOSYS; } -static inline void netlbl_skbuff_err(struct sk_buff *skb, int error) +static inline void netlbl_skbuff_err(struct sk_buff *skb, + int error, + int gateway) { return; } @@ -473,6 +579,11 @@ static inline int netlbl_cache_add(const struct sk_buff *skb, { return 0; } +static inline struct audit_buffer *netlbl_audit_start(int type, + struct netlbl_audit *audit_info) +{ + return NULL; +} #endif /* CONFIG_NETLABEL */ #endif /* _NETLABEL_H */ diff --git a/include/net/netlink.h b/include/net/netlink.h index a5506c42f03..2b47eaadba8 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -35,7 +35,7 @@ * nlmsg_new() create a new netlink message * nlmsg_put() add a netlink message to an skb * nlmsg_put_answer() callback based nlmsg_put() - * nlmsg_end() finanlize netlink message + * nlmsg_end() finalize netlink message * nlmsg_get_pos() return current position in message * nlmsg_trim() trim part of message * nlmsg_cancel() cancel message construction @@ -98,30 +98,17 @@ * nla_put_u16(skb, type, value) add u16 attribute to skb * nla_put_u32(skb, type, value) add u32 attribute to skb * nla_put_u64(skb, type, value) add u64 attribute to skb + * nla_put_s8(skb, type, value) add s8 attribute to skb + * nla_put_s16(skb, type, value) add s16 attribute to skb + * nla_put_s32(skb, type, value) add s32 attribute to skb + * nla_put_s64(skb, type, value) add s64 attribute to skb * nla_put_string(skb, type, str) add string attribute to skb * nla_put_flag(skb, type) add flag attribute to skb * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb * - * Exceptions Based Attribute Construction: - * NLA_PUT(skb, type, len, data) add attribute to skb - * NLA_PUT_U8(skb, type, value) add u8 attribute to skb - * NLA_PUT_U16(skb, type, value) add u16 attribute to skb - * NLA_PUT_U32(skb, type, value) add u32 attribute to skb - * NLA_PUT_U64(skb, type, value) add u64 attribute to skb - * NLA_PUT_STRING(skb, type, str) add string attribute to skb - * NLA_PUT_FLAG(skb, type) add flag attribute to skb - * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb - * - * The meaning of these functions is equal to their lower case - * variants but they jump to the label nla_put_failure in case - * of a failure. - * * Nested Attributes Construction: * nla_nest_start(skb, type) start a nested attribute * nla_nest_end(skb, nla) finalize a nested attribute - * nla_nest_compat_start(skb, type, start a nested compat attribute - * len, data) - * nla_nest_compat_end(skb, type) finalize a nested compat attribute * nla_nest_cancel(skb, nla) cancel nested attribute construction * * Attribute Length Calculations: @@ -138,6 +125,10 @@ * nla_get_u16(nla) get payload for a u16 attribute * nla_get_u32(nla) get payload for a u32 attribute * nla_get_u64(nla) get payload for a u64 attribute + * nla_get_s8(nla) get payload for a s8 attribute + * nla_get_s16(nla) get payload for a s16 attribute + * nla_get_s32(nla) get payload for a s32 attribute + * nla_get_s64(nla) get payload for a s64 attribute * nla_get_flag(nla) return 1 if flag is true * nla_get_msecs(nla) get payload for a msecs attribute * @@ -156,7 +147,6 @@ * nla_find_nested() find attribute in nested attributes * nla_parse() parse and validate stream of attrs * nla_parse_nested() parse nested attribuets - * nla_parse_nested_compat() parse nested compat attributes * nla_for_each_attr() loop over all attributes * nla_for_each_nested() loop over the nested attributes *========================================================================= @@ -178,6 +168,10 @@ enum { NLA_NESTED_COMPAT, NLA_NUL_STRING, NLA_BINARY, + NLA_S8, + NLA_S16, + NLA_S32, + NLA_S64, __NLA_TYPE_MAX, }; @@ -196,11 +190,20 @@ enum { * NLA_NUL_STRING Maximum length of string (excluding NUL) * NLA_FLAG Unused * NLA_BINARY Maximum length of attribute payload - * NLA_NESTED_COMPAT Exact length of structure payload - * All other Exact length of attribute payload + * NLA_NESTED Don't use `len' field -- length verification is + * done by checking len of nested header (or empty) + * NLA_NESTED_COMPAT Minimum length of structure payload + * NLA_U8, NLA_U16, + * NLA_U32, NLA_U64, + * NLA_S8, NLA_S16, + * NLA_S32, NLA_S64, + * NLA_MSECS Leaving the length field zero will verify the + * given type fits, using it verifies minimum length + * just like "All other" + * All other Minimum length of attribute payload * * Example: - * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = { + * static const struct nla_policy my_policy[ATTR_MAX+1] = { * [ATTR_FOO] = { .type = NLA_U16 }, * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ }, * [ATTR_BAZ] = { .len = sizeof(struct mystruct) }, @@ -214,49 +217,39 @@ struct nla_policy { /** * struct nl_info - netlink source information * @nlh: Netlink message header of original request - * @pid: Netlink PID of requesting application + * @portid: Netlink PORTID of requesting application */ struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; - u32 pid; + u32 portid; }; -extern int netlink_rcv_skb(struct sk_buff *skb, - int (*cb)(struct sk_buff *, - struct nlmsghdr *)); -extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb, - u32 pid, unsigned int group, int report, - gfp_t flags); - -extern int nla_validate(struct nlattr *head, int len, int maxtype, - const struct nla_policy *policy); -extern int nla_parse(struct nlattr *tb[], int maxtype, - struct nlattr *head, int len, - const struct nla_policy *policy); -extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype); -extern size_t nla_strlcpy(char *dst, const struct nlattr *nla, - size_t dstsize); -extern int nla_memcpy(void *dest, struct nlattr *src, int count); -extern int nla_memcmp(const struct nlattr *nla, const void *data, - size_t size); -extern int nla_strcmp(const struct nlattr *nla, const char *str); -extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype, - int attrlen); -extern void * __nla_reserve_nohdr(struct sk_buff *skb, int attrlen); -extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype, - int attrlen); -extern void * nla_reserve_nohdr(struct sk_buff *skb, int attrlen); -extern void __nla_put(struct sk_buff *skb, int attrtype, - int attrlen, const void *data); -extern void __nla_put_nohdr(struct sk_buff *skb, int attrlen, - const void *data); -extern int nla_put(struct sk_buff *skb, int attrtype, - int attrlen, const void *data); -extern int nla_put_nohdr(struct sk_buff *skb, int attrlen, - const void *data); -extern int nla_append(struct sk_buff *skb, int attrlen, - const void *data); +int netlink_rcv_skb(struct sk_buff *skb, + int (*cb)(struct sk_buff *, struct nlmsghdr *)); +int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, + unsigned int group, int report, gfp_t flags); + +int nla_validate(const struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy); +int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy); +int nla_policy_len(const struct nla_policy *, int); +struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); +size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); +int nla_memcpy(void *dest, const struct nlattr *src, int count); +int nla_memcmp(const struct nlattr *nla, const void *data, size_t size); +int nla_strcmp(const struct nlattr *nla, const char *str); +struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen); +struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen); +void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, + const void *data); +void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); +int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data); +int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); +int nla_append(struct sk_buff *skb, int attrlen, const void *data); /************************************************************************** * Netlink Messages @@ -291,7 +284,7 @@ static inline int nlmsg_padlen(int payload) /** * nlmsg_data - head of message payload - * @nlh: netlink messsage header + * @nlh: netlink message header */ static inline void *nlmsg_data(const struct nlmsghdr *nlh) { @@ -336,7 +329,7 @@ static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen) */ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining) { - return (remaining >= sizeof(struct nlmsghdr) && + return (remaining >= (int) sizeof(struct nlmsghdr) && nlh->nlmsg_len >= sizeof(struct nlmsghdr) && nlh->nlmsg_len <= remaining); } @@ -349,7 +342,8 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining) * Returns the next netlink message in the message stream and * decrements remaining by the size of the current message. */ -static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining) +static inline struct nlmsghdr * +nlmsg_next(const struct nlmsghdr *nlh, int *remaining) { int totlen = NLMSG_ALIGN(nlh->nlmsg_len); @@ -368,7 +362,7 @@ static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining) * * See nla_parse() */ -static inline int nlmsg_parse(struct nlmsghdr *nlh, int hdrlen, +static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, const struct nla_policy *policy) { @@ -387,7 +381,7 @@ static inline int nlmsg_parse(struct nlmsghdr *nlh, int hdrlen, * * Returns the first attribute which matches the specified type. */ -static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, +static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, int hdrlen, int attrtype) { return nla_find(nlmsg_attrdata(nlh, hdrlen), @@ -401,7 +395,8 @@ static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, * @maxtype: maximum attribute type to be expected * @policy: validation policy */ -static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype, +static inline int nlmsg_validate(const struct nlmsghdr *nlh, + int hdrlen, int maxtype, const struct nla_policy *policy) { if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) @@ -417,7 +412,7 @@ static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype, * * Returns 1 if a report back to the application is requested. */ -static inline int nlmsg_report(struct nlmsghdr *nlh) +static inline int nlmsg_report(const struct nlmsghdr *nlh) { return !!(nlh->nlmsg_flags & NLM_F_ECHO); } @@ -433,45 +428,10 @@ static inline int nlmsg_report(struct nlmsghdr *nlh) nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \ nlmsg_attrlen(nlh, hdrlen), rem) -#if 0 -/* FIXME: Enable once all users have been converted */ - -/** - * __nlmsg_put - Add a new netlink message to an skb - * @skb: socket buffer to store message in - * @pid: netlink process id - * @seq: sequence number of message - * @type: message type - * @payload: length of message payload - * @flags: message flags - * - * The caller is responsible to ensure that the skb provides enough - * tailroom for both the netlink header and payload. - */ -static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid, - u32 seq, int type, int payload, - int flags) -{ - struct nlmsghdr *nlh; - - nlh = (struct nlmsghdr *) skb_put(skb, nlmsg_total_size(payload)); - nlh->nlmsg_type = type; - nlh->nlmsg_len = nlmsg_msg_size(payload); - nlh->nlmsg_flags = flags; - nlh->nlmsg_pid = pid; - nlh->nlmsg_seq = seq; - - memset((unsigned char *) nlmsg_data(nlh) + payload, 0, - nlmsg_padlen(payload)); - - return nlh; -} -#endif - /** * nlmsg_put - Add a new netlink message to an skb * @skb: socket buffer to store message in - * @pid: netlink process id + * @portid: netlink process id * @seq: sequence number of message * @type: message type * @payload: length of message payload @@ -480,13 +440,13 @@ static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid, * Returns NULL if the tailroom of the skb is insufficient to store * the message header and payload. */ -static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, +static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int payload, int flags) { if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload))) return NULL; - return __nlmsg_put(skb, pid, seq, type, payload, flags); + return __nlmsg_put(skb, portid, seq, type, payload, flags); } /** @@ -505,7 +465,7 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb, int type, int payload, int flags) { - return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, + return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, type, payload, flags); } @@ -556,14 +516,12 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) * @skb: socket buffer the message is stored in * @mark: mark to trim to * - * Trims the message to the provided mark. Returns -1. + * Trims the message to the provided mark. */ -static inline int nlmsg_trim(struct sk_buff *skb, const void *mark) +static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) { if (mark) skb_trim(skb, (unsigned char *) mark - skb->data); - - return -1; } /** @@ -572,11 +530,11 @@ static inline int nlmsg_trim(struct sk_buff *skb, const void *mark) * @nlh: netlink message header * * Removes the complete netlink message including all - * attributes from the socket buffer again. Returns -1. + * attributes from the socket buffer again. */ -static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) +static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) { - return nlmsg_trim(skb, nlh); + nlmsg_trim(skb, nlh); } /** @@ -592,18 +550,18 @@ static inline void nlmsg_free(struct sk_buff *skb) * nlmsg_multicast - multicast a netlink message * @sk: netlink socket to spread messages to * @skb: netlink message as socket buffer - * @pid: own netlink pid to avoid sending to yourself + * @portid: own netlink portid to avoid sending to yourself * @group: multicast group id * @flags: allocation flags */ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, - u32 pid, unsigned int group, gfp_t flags) + u32 portid, unsigned int group, gfp_t flags) { int err; NETLINK_CB(skb).dst_group = group; - err = netlink_broadcast(sk, skb, pid, group, flags); + err = netlink_broadcast(sk, skb, portid, group, flags); if (err > 0) err = 0; @@ -614,13 +572,13 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, * nlmsg_unicast - unicast a netlink message * @sk: netlink socket to spread message to * @skb: netlink message as socket buffer - * @pid: netlink pid of the destination socket + * @portid: netlink portid of the destination socket */ -static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid) +static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid) { int err; - err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT); + err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT); if (err > 0) err = 0; @@ -639,6 +597,30 @@ static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid) nlmsg_ok(pos, rem); \ pos = nlmsg_next(pos, &(rem))) +/** + * nl_dump_check_consistent - check if sequence is consistent and advertise if not + * @cb: netlink callback structure that stores the sequence number + * @nlh: netlink message header to write the flag to + * + * This function checks if the sequence (generation) number changed during dump + * and if it did, advertises it in the netlink message header. + * + * The correct way to use it is to set cb->seq to the generation counter when + * all locks for dumping have been acquired, and then call this function for + * each message that is generated. + * + * Note that due to initialisation concerns, 0 is an invalid sequence number + * and must not be used by code that uses this functionality. + */ +static inline void +nl_dump_check_consistent(struct netlink_callback *cb, + struct nlmsghdr *nlh) +{ + if (cb->prev_seq && cb->seq != cb->prev_seq) + nlh->nlmsg_flags |= NLM_F_DUMP_INTR; + cb->prev_seq = cb->seq; +} + /************************************************************************** * Netlink Attributes **************************************************************************/ @@ -704,7 +686,7 @@ static inline int nla_len(const struct nlattr *nla) */ static inline int nla_ok(const struct nlattr *nla, int remaining) { - return remaining >= sizeof(*nla) && + return remaining >= (int) sizeof(*nla) && nla->nla_len >= sizeof(*nla) && nla->nla_len <= remaining; } @@ -732,7 +714,8 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining) * * Returns the first attribute which matches the specified type. */ -static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype) +static inline struct nlattr * +nla_find_nested(const struct nlattr *nla, int attrtype) { return nla_find(nla_data(nla), nla_len(nla), attrtype); } @@ -747,45 +730,13 @@ static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype) * See nla_parse() */ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, - struct nlattr *nla, + const struct nlattr *nla, const struct nla_policy *policy) { return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy); } /** - * nla_parse_nested_compat - parse nested compat attributes - * @tb: destination array with maxtype+1 elements - * @maxtype: maximum attribute type to be expected - * @nla: attribute containing the nested attributes - * @data: pointer to point to contained structure - * @len: length of contained structure - * @policy: validation policy - * - * Parse a nested compat attribute. The compat attribute contains a structure - * and optionally a set of nested attributes. On success the data pointer - * points to the nested data and tb contains the parsed attributes - * (see nla_parse). - */ -static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype, - struct nlattr *nla, - const struct nla_policy *policy, - int len) -{ - if (nla_len(nla) < len) - return -1; - if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr)) - return nla_parse_nested(tb, maxtype, - nla_data(nla) + NLA_ALIGN(len), - policy); - memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); - return 0; -} - -#define nla_parse_nested_compat(tb, maxtype, nla, policy, data, len) \ -({ data = nla_len(nla) >= len ? nla_data(nla) : NULL; \ - __nla_parse_nested_compat(tb, maxtype, nla, policy, len); }) -/** * nla_put_u8 - Add a u8 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -808,6 +759,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) } /** + * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put(skb, attrtype, sizeof(__be16), &value); +} + +/** + * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) +{ + return nla_put(skb, attrtype, sizeof(__le16), &value); +} + +/** * nla_put_u32 - Add a u32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -819,7 +803,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) } /** - * nla_put_64 - Add a u64 netlink attribute to a socket buffer + * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put(skb, attrtype, sizeof(__be32), &value); +} + +/** + * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) +{ + return nla_put(skb, attrtype, sizeof(__le32), &value); +} + +/** + * nla_put_u64 - Add a u64 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value @@ -830,6 +847,83 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) } /** + * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put(skb, attrtype, sizeof(__be64), &value); +} + +/** + * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) +{ + return nla_put(skb, attrtype, sizeof(__le64), &value); +} + +/** + * nla_put_s8 - Add a s8 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) +{ + return nla_put(skb, attrtype, sizeof(s8), &value); +} + +/** + * nla_put_s16 - Add a s16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) +{ + return nla_put(skb, attrtype, sizeof(s16), &value); +} + +/** + * nla_put_s32 - Add a s32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) +{ + return nla_put(skb, attrtype, sizeof(s32), &value); +} + +/** + * nla_put_s64 - Add a s64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value) +{ + return nla_put(skb, attrtype, sizeof(s64), &value); +} + +/** * nla_put_string - Add a string netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -864,53 +958,11 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, return nla_put(skb, attrtype, sizeof(u64), &tmp); } -#define NLA_PUT(skb, attrtype, attrlen, data) \ - do { \ - if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \ - goto nla_put_failure; \ - } while(0) - -#define NLA_PUT_TYPE(skb, type, attrtype, value) \ - do { \ - type __tmp = value; \ - NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \ - } while(0) - -#define NLA_PUT_U8(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u8, attrtype, value) - -#define NLA_PUT_U16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u16, attrtype, value) - -#define NLA_PUT_LE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __le16, attrtype, value) - -#define NLA_PUT_BE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be16, attrtype, value) - -#define NLA_PUT_U32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u32, attrtype, value) - -#define NLA_PUT_BE32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be32, attrtype, value) - -#define NLA_PUT_U64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u64, attrtype, value) - -#define NLA_PUT_STRING(skb, attrtype, value) \ - NLA_PUT(skb, attrtype, strlen(value) + 1, value) - -#define NLA_PUT_FLAG(skb, attrtype) \ - NLA_PUT(skb, attrtype, 0, NULL) - -#define NLA_PUT_MSECS(skb, attrtype, jiffies) \ - NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies)) - /** * nla_get_u32 - return payload of u32 attribute * @nla: u32 netlink attribute */ -static inline u32 nla_get_u32(struct nlattr *nla) +static inline u32 nla_get_u32(const struct nlattr *nla) { return *(u32 *) nla_data(nla); } @@ -919,7 +971,7 @@ static inline u32 nla_get_u32(struct nlattr *nla) * nla_get_be32 - return payload of __be32 attribute * @nla: __be32 netlink attribute */ -static inline __be32 nla_get_be32(struct nlattr *nla) +static inline __be32 nla_get_be32(const struct nlattr *nla) { return *(__be32 *) nla_data(nla); } @@ -928,7 +980,7 @@ static inline __be32 nla_get_be32(struct nlattr *nla) * nla_get_u16 - return payload of u16 attribute * @nla: u16 netlink attribute */ -static inline u16 nla_get_u16(struct nlattr *nla) +static inline u16 nla_get_u16(const struct nlattr *nla) { return *(u16 *) nla_data(nla); } @@ -937,7 +989,7 @@ static inline u16 nla_get_u16(struct nlattr *nla) * nla_get_be16 - return payload of __be16 attribute * @nla: __be16 netlink attribute */ -static inline __be16 nla_get_be16(struct nlattr *nla) +static inline __be16 nla_get_be16(const struct nlattr *nla) { return *(__be16 *) nla_data(nla); } @@ -946,7 +998,7 @@ static inline __be16 nla_get_be16(struct nlattr *nla) * nla_get_le16 - return payload of __le16 attribute * @nla: __le16 netlink attribute */ -static inline __le16 nla_get_le16(struct nlattr *nla) +static inline __le16 nla_get_le16(const struct nlattr *nla) { return *(__le16 *) nla_data(nla); } @@ -955,7 +1007,7 @@ static inline __le16 nla_get_le16(struct nlattr *nla) * nla_get_u8 - return payload of u8 attribute * @nla: u8 netlink attribute */ -static inline u8 nla_get_u8(struct nlattr *nla) +static inline u8 nla_get_u8(const struct nlattr *nla) { return *(u8 *) nla_data(nla); } @@ -964,7 +1016,7 @@ static inline u8 nla_get_u8(struct nlattr *nla) * nla_get_u64 - return payload of u64 attribute * @nla: u64 netlink attribute */ -static inline u64 nla_get_u64(struct nlattr *nla) +static inline u64 nla_get_u64(const struct nlattr *nla) { u64 tmp; @@ -974,10 +1026,63 @@ static inline u64 nla_get_u64(struct nlattr *nla) } /** + * nla_get_be64 - return payload of __be64 attribute + * @nla: __be64 netlink attribute + */ +static inline __be64 nla_get_be64(const struct nlattr *nla) +{ + __be64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; +} + +/** + * nla_get_s32 - return payload of s32 attribute + * @nla: s32 netlink attribute + */ +static inline s32 nla_get_s32(const struct nlattr *nla) +{ + return *(s32 *) nla_data(nla); +} + +/** + * nla_get_s16 - return payload of s16 attribute + * @nla: s16 netlink attribute + */ +static inline s16 nla_get_s16(const struct nlattr *nla) +{ + return *(s16 *) nla_data(nla); +} + +/** + * nla_get_s8 - return payload of s8 attribute + * @nla: s8 netlink attribute + */ +static inline s8 nla_get_s8(const struct nlattr *nla) +{ + return *(s8 *) nla_data(nla); +} + +/** + * nla_get_s64 - return payload of s64 attribute + * @nla: s64 netlink attribute + */ +static inline s64 nla_get_s64(const struct nlattr *nla) +{ + s64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; +} + +/** * nla_get_flag - return payload of flag attribute * @nla: flag netlink attribute */ -static inline int nla_get_flag(struct nlattr *nla) +static inline int nla_get_flag(const struct nlattr *nla) { return !!nla; } @@ -988,7 +1093,7 @@ static inline int nla_get_flag(struct nlattr *nla) * * Returns the number of milliseconds in jiffies. */ -static inline unsigned long nla_get_msecs(struct nlattr *nla) +static inline unsigned long nla_get_msecs(const struct nlattr *nla) { u64 msecs = nla_get_u64(nla); @@ -1029,61 +1134,16 @@ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start) } /** - * nla_nest_compat_start - Start a new level of nested compat attributes - * @skb: socket buffer to add attributes to - * @attrtype: attribute type of container - * @attrlen: length of structure - * @data: pointer to structure - * - * Start a nested compat attribute that contains both a structure and - * a set of nested attributes. - * - * Returns the container attribute - */ -static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb, - int attrtype, int attrlen, - const void *data) -{ - struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); - - if (nla_put(skb, attrtype, attrlen, data) < 0) - return NULL; - if (nla_nest_start(skb, attrtype) == NULL) { - nlmsg_trim(skb, start); - return NULL; - } - return start; -} - -/** - * nla_nest_compat_end - Finalize nesting of compat attributes - * @skb: socket buffer the attributes are stored in - * @start: container attribute - * - * Corrects the container attribute header to include the all - * appeneded attributes. - * - * Returns the total data length of the skb. - */ -static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start) -{ - struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len); - - start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; - return nla_nest_end(skb, nest); -} - -/** * nla_nest_cancel - Cancel nesting of attributes * @skb: socket buffer the message is stored in * @start: container attribute * * Removes the container attribute and including all nested - * attributes. Returns -1. + * attributes. Returns -EMSGSIZE */ -static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) +static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) { - return nlmsg_trim(skb, start); + nlmsg_trim(skb, start); } /** @@ -1098,7 +1158,7 @@ static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) * * Returns 0 on success or a negative error code. */ -static inline int nla_validate_nested(struct nlattr *start, int maxtype, +static inline int nla_validate_nested(const struct nlattr *start, int maxtype, const struct nla_policy *policy) { return nla_validate(nla_data(start), nla_len(start), maxtype, policy); diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h new file mode 100644 index 00000000000..773cce308bc --- /dev/null +++ b/include/net/netns/conntrack.h @@ -0,0 +1,112 @@ +#ifndef __NETNS_CONNTRACK_H +#define __NETNS_CONNTRACK_H + +#include <linux/list.h> +#include <linux/list_nulls.h> +#include <linux/atomic.h> +#include <linux/netfilter/nf_conntrack_tcp.h> +#include <linux/seqlock.h> + +struct ctl_table_header; +struct nf_conntrack_ecache; + +struct nf_proto_net { +#ifdef CONFIG_SYSCTL + struct ctl_table_header *ctl_table_header; + struct ctl_table *ctl_table; +#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT + struct ctl_table_header *ctl_compat_header; + struct ctl_table *ctl_compat_table; +#endif +#endif + unsigned int users; +}; + +struct nf_generic_net { + struct nf_proto_net pn; + unsigned int timeout; +}; + +struct nf_tcp_net { + struct nf_proto_net pn; + unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX]; + unsigned int tcp_loose; + unsigned int tcp_be_liberal; + unsigned int tcp_max_retrans; +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED, + UDP_CT_REPLIED, + UDP_CT_MAX +}; + +struct nf_udp_net { + struct nf_proto_net pn; + unsigned int timeouts[UDP_CT_MAX]; +}; + +struct nf_icmp_net { + struct nf_proto_net pn; + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) + struct ctl_table_header *ctl_table_header; + struct ctl_table *ctl_table; +#endif +}; + +struct ct_pcpu { + spinlock_t lock; + struct hlist_nulls_head unconfirmed; + struct hlist_nulls_head dying; + struct hlist_nulls_head tmpl; +}; + +struct netns_ct { + atomic_t count; + unsigned int expect_count; +#ifdef CONFIG_SYSCTL + struct ctl_table_header *sysctl_header; + struct ctl_table_header *acct_sysctl_header; + struct ctl_table_header *tstamp_sysctl_header; + struct ctl_table_header *event_sysctl_header; + struct ctl_table_header *helper_sysctl_header; +#endif + char *slabname; + unsigned int sysctl_log_invalid; /* Log invalid packets */ + unsigned int sysctl_events_retry_timeout; + int sysctl_events; + int sysctl_acct; + int sysctl_auto_assign_helper; + bool auto_assign_helper_warned; + int sysctl_tstamp; + int sysctl_checksum; + + unsigned int htable_size; + seqcount_t generation; + struct kmem_cache *nf_conntrack_cachep; + struct hlist_nulls_head *hash; + struct hlist_head *expect_hash; + struct ct_pcpu __percpu *pcpu_lists; + struct ip_conntrack_stat __percpu *stat; + struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; + struct nf_exp_event_notifier __rcu *nf_expect_event_cb; + struct nf_ip_net nf_ct_proto; +#if defined(CONFIG_NF_CONNTRACK_LABELS) + unsigned int labels_used; + u8 label_words; +#endif +#ifdef CONFIG_NF_NAT_NEEDED + struct hlist_head *nat_bysource; + unsigned int nat_htable_size; +#endif +}; +#endif diff --git a/include/net/netns/core.h b/include/net/netns/core.h new file mode 100644 index 00000000000..78eb1ff7547 --- /dev/null +++ b/include/net/netns/core.h @@ -0,0 +1,16 @@ +#ifndef __NETNS_CORE_H__ +#define __NETNS_CORE_H__ + +struct ctl_table_header; +struct prot_inuse; + +struct netns_core { + /* core sysctls */ + struct ctl_table_header *sysctl_hdr; + + int sysctl_somaxconn; + + struct prot_inuse __percpu *inuse; +}; + +#endif diff --git a/include/net/netns/dccp.h b/include/net/netns/dccp.h new file mode 100644 index 00000000000..98d2a7ce1f7 --- /dev/null +++ b/include/net/netns/dccp.h @@ -0,0 +1,11 @@ +#ifndef __NETNS_DCCP_H__ +#define __NETNS_DCCP_H__ + +struct sock; + +struct netns_dccp { + struct sock *v4_ctl_sk; + struct sock *v6_ctl_sk; +}; + +#endif diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h new file mode 100644 index 00000000000..0931618c0f7 --- /dev/null +++ b/include/net/netns/generic.h @@ -0,0 +1,48 @@ +/* + * generic net pointers + */ + +#ifndef __NET_GENERIC_H__ +#define __NET_GENERIC_H__ + +#include <linux/bug.h> +#include <linux/rcupdate.h> + +/* + * Generic net pointers are to be used by modules to put some private + * stuff on the struct net without explicit struct net modification + * + * The rules are simple: + * 1. set pernet_operations->id. After register_pernet_device you + * will have the id of your private pointer. + * 2. set pernet_operations->size to have the code allocate and free + * a private structure pointed to from struct net. + * 3. do not change this pointer while the net is alive; + * 4. do not try to have any private reference on the net_generic object. + * + * After accomplishing all of the above, the private pointer can be + * accessed with the net_generic() call. + */ + +struct net_generic { + unsigned int len; + struct rcu_head rcu; + + void *ptr[0]; +}; + +static inline void *net_generic(const struct net *net, int id) +{ + struct net_generic *ng; + void *ptr; + + rcu_read_lock(); + ng = rcu_dereference(net->gen); + BUG_ON(id == 0 || id > ng->len); + ptr = ng->ptr[id - 1]; + rcu_read_unlock(); + + BUG_ON(!ptr); + return ptr; +} +#endif diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h new file mode 100644 index 00000000000..c06ac58ca10 --- /dev/null +++ b/include/net/netns/hash.h @@ -0,0 +1,21 @@ +#ifndef __NET_NS_HASH_H__ +#define __NET_NS_HASH_H__ + +#include <asm/cache.h> + +struct net; + +static inline unsigned int net_hash_mix(struct net *net) +{ +#ifdef CONFIG_NET_NS + /* + * shift this right to eliminate bits, that are + * always zeroed + */ + + return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT); +#else + return 0; +#endif +} +#endif diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h new file mode 100644 index 00000000000..e2070960bac --- /dev/null +++ b/include/net/netns/ieee802154_6lowpan.h @@ -0,0 +1,22 @@ +/* + * ieee802154 6lowpan in net namespaces + */ + +#include <net/inet_frag.h> + +#ifndef __NETNS_IEEE802154_6LOWPAN_H__ +#define __NETNS_IEEE802154_6LOWPAN_H__ + +struct netns_sysctl_lowpan { +#ifdef CONFIG_SYSCTL + struct ctl_table_header *frags_hdr; +#endif +}; + +struct netns_ieee802154_lowpan { + struct netns_sysctl_lowpan sysctl; + struct netns_frags frags; + int max_dsize; +}; + +#endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index a9b4f608629..aec5e12f9f1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -5,33 +5,97 @@ #ifndef __NETNS_IPV4_H__ #define __NETNS_IPV4_H__ +#include <linux/uidgid.h> #include <net/inet_frag.h> +struct tcpm_hash_bucket; struct ctl_table_header; struct ipv4_devconf; struct fib_rules_ops; struct hlist_head; +struct fib_table; struct sock; +struct local_ports { + seqlock_t lock; + int range[2]; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; struct netns_ipv4 { #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; #endif struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; + bool fib_has_custom_rules; + struct fib_table *fib_local; + struct fib_table *fib_main; + struct fib_table *fib_default; +#endif +#ifdef CONFIG_IP_ROUTE_CLASSID + int fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; struct sock *fibnl; + struct sock **icmp_sk; + struct inet_peer_base *peers; + struct tcpm_hash_bucket *tcp_metrics_hash; + unsigned int tcp_metrics_hash_log; struct netns_frags frags; #ifdef CONFIG_NETFILTER struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; +#ifdef CONFIG_SECURITY + struct xt_table *iptable_security; +#endif + struct xt_table *nat_table; +#endif + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_errors_use_inbound_ifaddr; + + struct local_ports ip_local_ports; + + int sysctl_tcp_ecn; + int sysctl_ip_no_pmtu_disc; + int sysctl_ip_fwd_use_pmtu; + + int sysctl_fwmark_reflect; + int sysctl_tcp_fwmark_accept; + + struct ping_group_range ping_group_range; + + atomic_t dev_addr_genid; + +#ifdef CONFIG_SYSCTL + unsigned long *sysctl_local_reserved_ports; +#endif + +#ifdef CONFIG_IP_MROUTE +#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES + struct mr_table *mrt; +#else + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; +#endif #endif + atomic_t rt_genid; }; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 1dd7de4e419..19d3446e59d 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -6,13 +6,17 @@ #ifndef __NETNS_IPV6_H__ #define __NETNS_IPV6_H__ +#include <net/dst_ops.h> struct ctl_table_header; struct netns_sysctl_ipv6 { #ifdef CONFIG_SYSCTL - struct ctl_table_header *table; + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; #endif int bindv6only; int flush_delay; @@ -23,18 +27,62 @@ struct netns_sysctl_ipv6 { int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; + int flowlabel_consistency; int icmpv6_time; + int anycast_src_echo_reply; + int fwmark_reflect; }; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; struct netns_frags frags; #ifdef CONFIG_NETFILTER struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; +#ifdef CONFIG_SECURITY + struct xt_table *ip6table_security; #endif + struct xt_table *ip6table_nat; +#endif + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct dst_ops ip6_dst_ops; + unsigned int ip6_rt_gc_expire; + unsigned long ip6_rt_last_gc; +#ifdef CONFIG_IPV6_MULTIPLE_TABLES + struct rt6_info *ip6_prohibit_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; +#endif + struct sock **icmp_sk; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; +#ifdef CONFIG_IPV6_MROUTE +#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + struct mr6_table *mrt6; +#else + struct list_head mr6_tables; + struct fib_rules_ops *mr6_rules_ops; +#endif +#endif + atomic_t dev_addr_genid; + atomic_t rt_genid; +}; + +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) +struct netns_nf_frag { + struct netns_sysctl_ipv6 sysctl; + struct netns_frags frags; }; #endif + +#endif diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h new file mode 100644 index 00000000000..d542a4b28cc --- /dev/null +++ b/include/net/netns/mib.h @@ -0,0 +1,28 @@ +#ifndef __NETNS_MIB_H__ +#define __NETNS_MIB_H__ + +#include <net/snmp.h> + +struct netns_mib { + DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); + DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); + DEFINE_SNMP_STAT(struct linux_mib, net_statistics); + DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); + DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); + DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); + +#if IS_ENABLED(CONFIG_IPV6) + struct proc_dir_entry *proc_net_devsnmp6; + DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); + DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); + DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); + DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics); +#endif +#ifdef CONFIG_XFRM_STATISTICS + DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); +#endif +}; + +#endif diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h new file mode 100644 index 00000000000..88740024ccf --- /dev/null +++ b/include/net/netns/netfilter.h @@ -0,0 +1,18 @@ +#ifndef __NETNS_NETFILTER_H +#define __NETNS_NETFILTER_H + +#include <linux/proc_fs.h> +#include <linux/netfilter.h> + +struct nf_logger; + +struct netns_nf { +#if defined CONFIG_PROC_FS + struct proc_dir_entry *proc_netfilter; +#endif + const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; +#ifdef CONFIG_SYSCTL + struct ctl_table_header *nf_log_dir_header; +#endif +}; +#endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h new file mode 100644 index 00000000000..eee608b12cc --- /dev/null +++ b/include/net/netns/nftables.h @@ -0,0 +1,20 @@ +#ifndef _NETNS_NFTABLES_H_ +#define _NETNS_NFTABLES_H_ + +#include <linux/list.h> + +struct nft_af_info; + +struct netns_nftables { + struct list_head af_info; + struct list_head commit_list; + struct nft_af_info *ipv4; + struct nft_af_info *ipv6; + struct nft_af_info *inet; + struct nft_af_info *arp; + struct nft_af_info *bridge; + unsigned int base_seq; + u8 gencursor; +}; + +#endif diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h index 637daf69888..17ec2b95c06 100644 --- a/include/net/netns/packet.h +++ b/include/net/netns/packet.h @@ -4,11 +4,11 @@ #ifndef __NETNS_PACKET_H__ #define __NETNS_PACKET_H__ -#include <linux/list.h> -#include <linux/spinlock.h> +#include <linux/rculist.h> +#include <linux/mutex.h> struct netns_packet { - rwlock_t sklist_lock; + struct mutex sklist_lock; struct hlist_head sklist; }; diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h new file mode 100644 index 00000000000..3573a81815a --- /dev/null +++ b/include/net/netns/sctp.h @@ -0,0 +1,134 @@ +#ifndef __NETNS_SCTP_H__ +#define __NETNS_SCTP_H__ + +struct sock; +struct proc_dir_entry; +struct sctp_mib; +struct ctl_table_header; + +struct netns_sctp { + DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics); + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_net_sctp; +#endif +#ifdef CONFIG_SYSCTL + struct ctl_table_header *sysctl_header; +#endif + /* This is the global socket data structure used for responding to + * the Out-of-the-blue (OOTB) packets. A control sock will be created + * for this socket at the initialization time. + */ + struct sock *ctl_sock; + + /* This is the global local address list. + * We actively maintain this complete list of addresses on + * the system by catching address add/delete events. + * + * It is a list of sctp_sockaddr_entry. + */ + struct list_head local_addr_list; + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; + spinlock_t addr_wq_lock; + + /* Lock that protects the local_addr_list writers */ + spinlock_t local_addr_lock; + + /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values + * + * The following protocol parameters are RECOMMENDED: + * + * RTO.Initial - 3 seconds + * RTO.Min - 1 second + * RTO.Max - 60 seconds + * RTO.Alpha - 1/8 (3 when converted to right shifts.) + * RTO.Beta - 1/4 (2 when converted to right shifts.) + */ + unsigned int rto_initial; + unsigned int rto_min; + unsigned int rto_max; + + /* Note: rto_alpha and rto_beta are really defined as inverse + * powers of two to facilitate integer operations. + */ + int rto_alpha; + int rto_beta; + + /* Max.Burst - 4 */ + int max_burst; + + /* Whether Cookie Preservative is enabled(1) or not(0) */ + int cookie_preserve_enable; + + /* The namespace default hmac alg */ + char *sctp_hmac_alg; + + /* Valid.Cookie.Life - 60 seconds */ + unsigned int valid_cookie_life; + + /* Delayed SACK timeout 200ms default*/ + unsigned int sack_timeout; + + /* HB.interval - 30 seconds */ + unsigned int hb_interval; + + /* Association.Max.Retrans - 10 attempts + * Path.Max.Retrans - 5 attempts (per destination address) + * Max.Init.Retransmits - 8 attempts + */ + int max_retrans_association; + int max_retrans_path; + int max_retrans_init; + /* Potentially-Failed.Max.Retrans sysctl value + * taken from: + * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05 + */ + int pf_retrans; + + /* + * Policy for preforming sctp/socket accounting + * 0 - do socket level accounting, all assocs share sk_sndbuf + * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes + */ + int sndbuf_policy; + + /* + * Policy for preforming sctp/socket accounting + * 0 - do socket level accounting, all assocs share sk_rcvbuf + * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes + */ + int rcvbuf_policy; + + int default_auto_asconf; + + /* Flag to indicate if addip is enabled. */ + int addip_enable; + int addip_noauth; + + /* Flag to indicate if PR-SCTP is enabled. */ + int prsctp_enable; + + /* Flag to idicate if SCTP-AUTH is enabled */ + int auth_enable; + + /* + * Policy to control SCTP IPv4 address scoping + * 0 - Disable IPv4 address scoping + * 1 - Enable IPv4 address scoping + * 2 - Selectively allow only IPv4 private addresses + * 3 - Selectively allow only IPv4 link local address + */ + int scope_policy; + + /* Threshold for rwnd update SACKS. Receive buffer shifted this many + * bits is an indicator of when to send and window update SACK. + */ + int rwnd_upd_shift; + + /* Threshold for autoclose timeout, in seconds. */ + unsigned long max_autoclose; +}; + +#endif /* __NETNS_SCTP_H__ */ diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index 0cb63ed2c1f..02fe40f8c8f 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -2,9 +2,24 @@ #define __NETNS_X_TABLES_H #include <linux/list.h> -#include <linux/net.h> +#include <linux/netfilter.h> + +struct ebt_table; struct netns_xt { - struct list_head tables[NPROTO]; + struct list_head tables[NFPROTO_NUMPROTO]; + bool notrack_deprecated_warning; +#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \ + defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE) + struct ebt_table *broute_table; + struct ebt_table *frame_filter; + struct ebt_table *frame_nat; +#endif +#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG) + bool ulog_warn_deprecated; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG) + bool ebt_ulog_warn_deprecated; +#endif }; #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h new file mode 100644 index 00000000000..3492434baf8 --- /dev/null +++ b/include/net/netns/xfrm.h @@ -0,0 +1,75 @@ +#ifndef __NETNS_XFRM_H +#define __NETNS_XFRM_H + +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/workqueue.h> +#include <linux/xfrm.h> +#include <net/dst_ops.h> +#include <net/flowcache.h> + +struct ctl_table_header; + +struct xfrm_policy_hash { + struct hlist_head *table; + unsigned int hmask; +}; + +struct netns_xfrm { + struct list_head state_all; + /* + * Hash table to find appropriate SA towards given target (endpoint of + * tunnel or destination of transport mode) allowed by selector. + * + * Main use is finding SA after policy selected tunnel or transport + * mode. Also, it can be used by ah/esp icmp error handler to find + * offending SA. + */ + struct hlist_head *state_bydst; + struct hlist_head *state_bysrc; + struct hlist_head *state_byspi; + unsigned int state_hmask; + unsigned int state_num; + struct work_struct state_hash_work; + struct hlist_head state_gc_list; + struct work_struct state_gc_work; + + struct list_head policy_all; + struct hlist_head *policy_byidx; + unsigned int policy_idx_hmask; + struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; + unsigned int policy_count[XFRM_POLICY_MAX * 2]; + struct work_struct policy_hash_work; + + + struct sock *nlsk; + struct sock *nlsk_stash; + + u32 sysctl_aevent_etime; + u32 sysctl_aevent_rseqth; + int sysctl_larval_drop; + u32 sysctl_acq_expires; +#ifdef CONFIG_SYSCTL + struct ctl_table_header *sysctl_hdr; +#endif + + struct dst_ops xfrm4_dst_ops; +#if IS_ENABLED(CONFIG_IPV6) + struct dst_ops xfrm6_dst_ops; +#endif + spinlock_t xfrm_state_lock; + rwlock_t xfrm_policy_lock; + struct mutex xfrm_cfg_mutex; + + /* flow cache part */ + struct flow_cache flow_cache_global; + atomic_t flow_cache_genid; + struct list_head flow_cache_gc_list; + spinlock_t flow_cache_gc_lock; + struct work_struct flow_cache_gc_work; + struct work_struct flow_cache_flush_work; + struct mutex flow_flush_sem; +}; + +#endif diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h new file mode 100644 index 00000000000..f2a9597ff53 --- /dev/null +++ b/include/net/netprio_cgroup.h @@ -0,0 +1,50 @@ +/* + * netprio_cgroup.h Control Group Priority set + * + * + * Authors: Neil Horman <nhorman@tuxdriver.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _NETPRIO_CGROUP_H +#define _NETPRIO_CGROUP_H + +#include <linux/cgroup.h> +#include <linux/hardirq.h> +#include <linux/rcupdate.h> + +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) +struct netprio_map { + struct rcu_head rcu; + u32 priomap_len; + u32 priomap[]; +}; + +void sock_update_netprioidx(struct sock *sk); + +static inline u32 task_netprioidx(struct task_struct *p) +{ + struct cgroup_subsys_state *css; + u32 idx; + + rcu_read_lock(); + css = task_css(p, net_prio_cgrp_id); + idx = css->cgroup->id; + rcu_read_unlock(); + return idx; +} +#else /* !CONFIG_CGROUP_NET_PRIO */ +static inline u32 task_netprioidx(struct task_struct *p) +{ + return 0; +} + +#define sock_update_netprioidx(sk) + +#endif /* CONFIG_CGROUP_NET_PRIO */ +#endif /* _NET_CLS_CGROUP_H */ diff --git a/include/net/netrom.h b/include/net/netrom.h index f06852bba62..110350aca3d 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h @@ -9,6 +9,7 @@ #include <linux/netrom.h> #include <linux/list.h> +#include <linux/slab.h> #include <net/sock.h> #define NR_NETWORK_LEN 15 @@ -59,10 +60,6 @@ enum { #define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */ #define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */ -struct nr_private { - struct net_device_stats stats; -}; - struct nr_sock { struct sock sock; ax25_address user_addr, source_addr, dest_addr; @@ -136,6 +133,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node) static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) { if (atomic_dec_and_test(&nr_neigh->refcount)) { + if (nr_neigh->ax25) + ax25_cb_put(nr_neigh->ax25); kfree(nr_neigh->digipeat); kfree(nr_neigh); } @@ -155,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node) nr_node_put(nr_node); } -#define nr_neigh_for_each(__nr_neigh, node, list) \ - hlist_for_each_entry(__nr_neigh, node, list, neigh_node) +#define nr_neigh_for_each(__nr_neigh, list) \ + hlist_for_each_entry(__nr_neigh, list, neigh_node) -#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \ - hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node) +#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \ + hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node) -#define nr_node_for_each(__nr_node, node, list) \ - hlist_for_each_entry(__nr_node, node, list, node_node) +#define nr_node_for_each(__nr_node, list) \ + hlist_for_each_entry(__nr_node, list, node_node) -#define nr_node_for_each_safe(__nr_node, node, node2, list) \ - hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node) +#define nr_node_for_each_safe(__nr_node, node2, list) \ + hlist_for_each_entry_safe(__nr_node, node2, list, node_node) /*********************************************************************/ @@ -184,51 +183,50 @@ extern int sysctl_netrom_routing_control; extern int sysctl_netrom_link_fails_count; extern int sysctl_netrom_reset_circuit; -extern int nr_rx_frame(struct sk_buff *, struct net_device *); -extern void nr_destroy_socket(struct sock *); +int nr_rx_frame(struct sk_buff *, struct net_device *); +void nr_destroy_socket(struct sock *); /* nr_dev.c */ -extern int nr_rx_ip(struct sk_buff *, struct net_device *); -extern void nr_setup(struct net_device *); +int nr_rx_ip(struct sk_buff *, struct net_device *); +void nr_setup(struct net_device *); /* nr_in.c */ -extern int nr_process_rx_frame(struct sock *, struct sk_buff *); +int nr_process_rx_frame(struct sock *, struct sk_buff *); /* nr_loopback.c */ -extern void nr_loopback_init(void); -extern void nr_loopback_clear(void); -extern int nr_loopback_queue(struct sk_buff *); +void nr_loopback_init(void); +void nr_loopback_clear(void); +int nr_loopback_queue(struct sk_buff *); /* nr_out.c */ -extern void nr_output(struct sock *, struct sk_buff *); -extern void nr_send_nak_frame(struct sock *); -extern void nr_kick(struct sock *); -extern void nr_transmit_buffer(struct sock *, struct sk_buff *); -extern void nr_establish_data_link(struct sock *); -extern void nr_enquiry_response(struct sock *); -extern void nr_check_iframes_acked(struct sock *, unsigned short); +void nr_output(struct sock *, struct sk_buff *); +void nr_send_nak_frame(struct sock *); +void nr_kick(struct sock *); +void nr_transmit_buffer(struct sock *, struct sk_buff *); +void nr_establish_data_link(struct sock *); +void nr_enquiry_response(struct sock *); +void nr_check_iframes_acked(struct sock *, unsigned short); /* nr_route.c */ -extern void nr_rt_device_down(struct net_device *); -extern struct net_device *nr_dev_first(void); -extern struct net_device *nr_dev_get(ax25_address *); -extern int nr_rt_ioctl(unsigned int, void __user *); -extern void nr_link_failed(ax25_cb *, int); -extern int nr_route_frame(struct sk_buff *, ax25_cb *); +void nr_rt_device_down(struct net_device *); +struct net_device *nr_dev_first(void); +struct net_device *nr_dev_get(ax25_address *); +int nr_rt_ioctl(unsigned int, void __user *); +void nr_link_failed(ax25_cb *, int); +int nr_route_frame(struct sk_buff *, ax25_cb *); extern const struct file_operations nr_nodes_fops; extern const struct file_operations nr_neigh_fops; -extern void nr_rt_free(void); +void nr_rt_free(void); /* nr_subr.c */ -extern void nr_clear_queues(struct sock *); -extern void nr_frames_acked(struct sock *, unsigned short); -extern void nr_requeue_frames(struct sock *); -extern int nr_validate_nr(struct sock *, unsigned short); -extern int nr_in_rx_window(struct sock *, unsigned short); -extern void nr_write_internal(struct sock *, int); +void nr_clear_queues(struct sock *); +void nr_frames_acked(struct sock *, unsigned short); +void nr_requeue_frames(struct sock *); +int nr_validate_nr(struct sock *, unsigned short); +int nr_in_rx_window(struct sock *, unsigned short); +void nr_write_internal(struct sock *, int); -extern void __nr_transmit_reply(struct sk_buff *skb, int mine, - unsigned char cmdflags); +void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags); /* * This routine is called when a Connect Acknowledge with the Choke Flag @@ -248,24 +246,24 @@ do { \ __nr_transmit_reply((skb), (mine), NR_RESET); \ } while (0) -extern void nr_disconnect(struct sock *, int); +void nr_disconnect(struct sock *, int); /* nr_timer.c */ -extern void nr_init_timers(struct sock *sk); -extern void nr_start_heartbeat(struct sock *); -extern void nr_start_t1timer(struct sock *); -extern void nr_start_t2timer(struct sock *); -extern void nr_start_t4timer(struct sock *); -extern void nr_start_idletimer(struct sock *); -extern void nr_stop_heartbeat(struct sock *); -extern void nr_stop_t1timer(struct sock *); -extern void nr_stop_t2timer(struct sock *); -extern void nr_stop_t4timer(struct sock *); -extern void nr_stop_idletimer(struct sock *); -extern int nr_t1timer_running(struct sock *); +void nr_init_timers(struct sock *sk); +void nr_start_heartbeat(struct sock *); +void nr_start_t1timer(struct sock *); +void nr_start_t2timer(struct sock *); +void nr_start_t4timer(struct sock *); +void nr_start_idletimer(struct sock *); +void nr_stop_heartbeat(struct sock *); +void nr_stop_t1timer(struct sock *); +void nr_stop_t2timer(struct sock *); +void nr_stop_t4timer(struct sock *); +void nr_stop_idletimer(struct sock *); +int nr_t1timer_running(struct sock *); /* sysctl_net_netrom.c */ -extern void nr_register_sysctl(void); -extern void nr_unregister_sysctl(void); +void nr_register_sysctl(void); +void nr_unregister_sysctl(void); #endif diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h new file mode 100644 index 00000000000..bdf55c3b7a1 --- /dev/null +++ b/include/net/nfc/digital.h @@ -0,0 +1,248 @@ +/* + * NFC Digital Protocol stack + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __NFC_DIGITAL_H +#define __NFC_DIGITAL_H + +#include <linux/skbuff.h> +#include <net/nfc/nfc.h> + +/** + * Configuration types for in_configure_hw and tg_configure_hw. + */ +enum { + NFC_DIGITAL_CONFIG_RF_TECH = 0, + NFC_DIGITAL_CONFIG_FRAMING, +}; + +/** + * RF technology values passed as param argument to in_configure_hw and + * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type. + */ +enum { + NFC_DIGITAL_RF_TECH_106A = 0, + NFC_DIGITAL_RF_TECH_212F, + NFC_DIGITAL_RF_TECH_424F, + NFC_DIGITAL_RF_TECH_ISO15693, + NFC_DIGITAL_RF_TECH_106B, + + NFC_DIGITAL_RF_TECH_LAST, +}; + +/** + * Framing configuration passed as param argument to in_configure_hw and + * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type. + */ +enum { + NFC_DIGITAL_FRAMING_NFCA_SHORT = 0, + NFC_DIGITAL_FRAMING_NFCA_STANDARD, + NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A, + + NFC_DIGITAL_FRAMING_NFCA_T1T, + NFC_DIGITAL_FRAMING_NFCA_T2T, + NFC_DIGITAL_FRAMING_NFCA_T4T, + NFC_DIGITAL_FRAMING_NFCA_NFC_DEP, + + NFC_DIGITAL_FRAMING_NFCF, + NFC_DIGITAL_FRAMING_NFCF_T3T, + NFC_DIGITAL_FRAMING_NFCF_NFC_DEP, + NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED, + + NFC_DIGITAL_FRAMING_ISO15693_INVENTORY, + NFC_DIGITAL_FRAMING_ISO15693_T5T, + + NFC_DIGITAL_FRAMING_NFCB, + NFC_DIGITAL_FRAMING_NFCB_T4T, + + NFC_DIGITAL_FRAMING_LAST, +}; + +#define DIGITAL_MDAA_NFCID1_SIZE 3 + +struct digital_tg_mdaa_params { + u16 sens_res; + u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE]; + u8 sel_res; + + u8 nfcid2[NFC_NFCID2_MAXSIZE]; + u16 sc; +}; + +struct nfc_digital_dev; + +/** + * nfc_digital_cmd_complete_t - Definition of command result callback + * + * @ddev: nfc_digital_device ref + * @arg: user data + * @resp: response data + * + * resp pointer can be an error code and will be checked with IS_ERR() macro. + * The callback is responsible for freeing resp sk_buff. + */ +typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev, + void *arg, struct sk_buff *resp); + +/** + * Device side NFC Digital operations + * + * Initiator mode: + * @in_configure_hw: Hardware configuration for RF technology and communication + * framing in initiator mode. This is a synchronous function. + * @in_send_cmd: Initiator mode data exchange using RF technology and framing + * previously set with in_configure_hw. The peer response is returned + * through callback cb. If an io error occurs or the peer didn't reply + * within the specified timeout (ms), the error code is passed back through + * the resp pointer. This is an asynchronous function. + * + * Target mode: Only NFC-DEP protocol is supported in target mode. + * @tg_configure_hw: Hardware configuration for RF technology and communication + * framing in target mode. This is a synchronous function. + * @tg_send_cmd: Target mode data exchange using RF technology and framing + * previously set with tg_configure_hw. The peer next command is returned + * through callback cb. If an io error occurs or the peer didn't reply + * within the specified timeout (ms), the error code is passed back through + * the resp pointer. This is an asynchronous function. + * @tg_listen: Put the device in listen mode waiting for data from the peer + * device. This is an asynchronous function. + * @tg_listen_mdaa: If supported, put the device in automatic listen mode with + * mode detection and automatic anti-collision. In this mode, the device + * automatically detects the RF technology and executes the anti-collision + * detection using the command responses specified in mdaa_params. The + * mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF + * tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns + * the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF + * tech by analyzing the SoD of the frame containing the ATR_REQ command. + * This is an asynchronous function. + * + * @switch_rf: Turns device radio on or off. The stack does not call explicitly + * switch_rf to turn the radio on. A call to in|tg_configure_hw must turn + * the device radio on. + * @abort_cmd: Discard the last sent command. + * + * Notes: Asynchronous functions have a timeout parameter. It is the driver + * responsibility to call the digital stack back through the + * nfc_digital_cmd_complete_t callback when no RF respsonse has been + * received within the specified time (in milliseconds). In that case the + * driver must set the resp sk_buff to ERR_PTR(-ETIMEDOUT). + * Since the digital stack serializes commands to be sent, it's mandatory + * for the driver to handle the timeout correctly. Otherwise the stack + * would not be able to send new commands, waiting for the reply of the + * current one. + */ +struct nfc_digital_ops { + int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type, + int param); + int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb, + u16 timeout, nfc_digital_cmd_complete_t cb, + void *arg); + + int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type, + int param); + int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb, + u16 timeout, nfc_digital_cmd_complete_t cb, + void *arg); + int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout, + nfc_digital_cmd_complete_t cb, void *arg); + int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev, + struct digital_tg_mdaa_params *mdaa_params, + u16 timeout, nfc_digital_cmd_complete_t cb, + void *arg); + + int (*switch_rf)(struct nfc_digital_dev *ddev, bool on); + void (*abort_cmd)(struct nfc_digital_dev *ddev); +}; + +#define NFC_DIGITAL_POLL_MODE_COUNT_MAX 6 /* 106A, 212F, and 424F in & tg */ + +typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech); + +struct digital_poll_tech { + u8 rf_tech; + digital_poll_t poll_func; +}; + +/** + * Driver capabilities - bit mask made of the following values + * + * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator + * mode. + * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target + * mode. + */ +#define NFC_DIGITAL_DRV_CAPS_IN_CRC 0x0001 +#define NFC_DIGITAL_DRV_CAPS_TG_CRC 0x0002 + +struct nfc_digital_dev { + struct nfc_dev *nfc_dev; + struct nfc_digital_ops *ops; + + u32 protocols; + + int tx_headroom; + int tx_tailroom; + + u32 driver_capabilities; + void *driver_data; + + struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX]; + u8 poll_tech_count; + u8 poll_tech_index; + struct mutex poll_lock; + + struct work_struct cmd_work; + struct work_struct cmd_complete_work; + struct list_head cmd_queue; + struct mutex cmd_lock; + + struct work_struct poll_work; + + u8 curr_protocol; + u8 curr_rf_tech; + u8 curr_nfc_dep_pni; + + u16 target_fsc; + + int (*skb_check_crc)(struct sk_buff *skb); + void (*skb_add_crc)(struct sk_buff *skb); +}; + +struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops, + __u32 supported_protocols, + __u32 driver_capabilities, + int tx_headroom, + int tx_tailroom); +void nfc_digital_free_device(struct nfc_digital_dev *ndev); +int nfc_digital_register_device(struct nfc_digital_dev *ndev); +void nfc_digital_unregister_device(struct nfc_digital_dev *ndev); + +static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev, + struct device *dev) +{ + nfc_set_parent_dev(ndev->nfc_dev, dev); +} + +static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev, + void *data) +{ + dev->driver_data = data; +} + +static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev) +{ + return dev->driver_data; +} + +#endif /* __NFC_DIGITAL_H */ diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h new file mode 100644 index 00000000000..61286db5438 --- /dev/null +++ b/include/net/nfc/hci.h @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __NET_HCI_H +#define __NET_HCI_H + +#include <linux/skbuff.h> + +#include <net/nfc/nfc.h> + +struct nfc_hci_dev; + +struct nfc_hci_ops { + int (*open) (struct nfc_hci_dev *hdev); + void (*close) (struct nfc_hci_dev *hdev); + int (*load_session) (struct nfc_hci_dev *hdev); + int (*hci_ready) (struct nfc_hci_dev *hdev); + /* + * xmit must always send the complete buffer before + * returning. Returned result must be 0 for success + * or negative for failure. + */ + int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb); + int (*start_poll) (struct nfc_hci_dev *hdev, + u32 im_protocols, u32 tm_protocols); + int (*dep_link_up)(struct nfc_hci_dev *hdev, struct nfc_target *target, + u8 comm_mode, u8 *gb, size_t gb_len); + int (*dep_link_down)(struct nfc_hci_dev *hdev); + int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*im_transceive) (struct nfc_hci_dev *hdev, + struct nfc_target *target, struct sk_buff *skb, + data_exchange_cb_t cb, void *cb_context); + int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb); + int (*check_presence)(struct nfc_hci_dev *hdev, + struct nfc_target *target); + int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, + struct sk_buff *skb); + int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name); + int (*discover_se)(struct nfc_hci_dev *dev); + int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); + int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); +}; + +/* Pipes */ +#define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_LINK_MGMT_PIPE 0x00 +#define NFC_HCI_ADMIN_PIPE 0x01 + +struct nfc_hci_gate { + u8 gate; + u8 pipe; +}; + +#define NFC_HCI_MAX_CUSTOM_GATES 50 +struct nfc_hci_init_data { + u8 gate_count; + struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; + char session_id[9]; +}; + +typedef int (*xmit) (struct sk_buff *skb, void *cb_data); + +#define NFC_HCI_MAX_GATES 256 + +/* + * These values can be specified by a driver to indicate it requires some + * adaptation of the HCI standard. + * + * NFC_HCI_QUIRK_SHORT_CLEAR - send HCI_ADM_CLEAR_ALL_PIPE cmd with no params + */ +enum { + NFC_HCI_QUIRK_SHORT_CLEAR = 0, +}; + +struct nfc_hci_dev { + struct nfc_dev *ndev; + + u32 max_data_link_payload; + + bool shutting_down; + + struct mutex msg_tx_mutex; + + struct list_head msg_tx_queue; + + struct work_struct msg_tx_work; + + struct timer_list cmd_timer; + struct hci_msg *cmd_pending_msg; + + struct sk_buff_head rx_hcp_frags; + + struct work_struct msg_rx_work; + + struct sk_buff_head msg_rx_queue; + + struct nfc_hci_ops *ops; + + struct nfc_llc *llc; + + struct nfc_hci_init_data init_data; + + void *clientdata; + + u8 gate2pipe[NFC_HCI_MAX_GATES]; + + u8 sw_romlib; + u8 sw_patch; + u8 sw_flashlib_major; + u8 sw_flashlib_minor; + + u8 hw_derivative; + u8 hw_version; + u8 hw_mpw; + u8 hw_software; + u8 hw_bsid; + + int async_cb_type; + data_exchange_cb_t async_cb; + void *async_cb_context; + + u8 *gb; + size_t gb_len; + + unsigned long quirks; +}; + +/* hci device allocation */ +struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, + struct nfc_hci_init_data *init_data, + unsigned long quirks, + u32 protocols, + const char *llc_name, + int tx_headroom, + int tx_tailroom, + int max_link_payload); +void nfc_hci_free_device(struct nfc_hci_dev *hdev); + +int nfc_hci_register_device(struct nfc_hci_dev *hdev); +void nfc_hci_unregister_device(struct nfc_hci_dev *hdev); + +void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata); +void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev); + +void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err); + +int nfc_hci_result_to_errno(u8 result); + +/* Host IDs */ +#define NFC_HCI_HOST_CONTROLLER_ID 0x00 +#define NFC_HCI_TERMINAL_HOST_ID 0x01 +#define NFC_HCI_UICC_HOST_ID 0x02 + +/* Host Controller Gates and registry indexes */ +#define NFC_HCI_ADMIN_GATE 0x00 +#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01 +#define NFC_HCI_ADMIN_MAX_PIPE 0x02 +#define NFC_HCI_ADMIN_WHITELIST 0x03 +#define NFC_HCI_ADMIN_HOST_LIST 0x04 + +#define NFC_HCI_LOOPBACK_GATE 0x04 + +#define NFC_HCI_ID_MGMT_GATE 0x05 +#define NFC_HCI_ID_MGMT_VERSION_SW 0x01 +#define NFC_HCI_ID_MGMT_VERSION_HW 0x03 +#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04 +#define NFC_HCI_ID_MGMT_MODEL_ID 0x05 +#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02 +#define NFC_HCI_ID_MGMT_GATES_LIST 0x06 + +#define NFC_HCI_LINK_MGMT_GATE 0x06 +#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01 + +#define NFC_HCI_RF_READER_B_GATE 0x11 +#define NFC_HCI_RF_READER_B_PUPI 0x03 +#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04 +#define NFC_HCI_RF_READER_B_AFI 0x02 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05 + +#define NFC_HCI_RF_READER_A_GATE 0x13 +#define NFC_HCI_RF_READER_A_UID 0x02 +#define NFC_HCI_RF_READER_A_ATQA 0x04 +#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05 +#define NFC_HCI_RF_READER_A_SAK 0x03 +#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06 +#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01 + +#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5) +#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1 +#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3 + +/* Generic events */ +#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01 +#define NFC_HCI_EVT_POST_DATA 0x02 +#define NFC_HCI_EVT_HOT_PLUG 0x03 + +/* Reader RF gates events */ +#define NFC_HCI_EVT_READER_REQUESTED 0x10 +#define NFC_HCI_EVT_END_OPERATION 0x11 + +/* Reader Application gate events */ +#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10 + +/* receiving messages from lower layer */ +void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb); +void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb); +void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, + struct sk_buff *skb); +void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb); + +/* connecting to gates and sending hci instructions */ +int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, + u8 pipe); +int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate); +int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev); +int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + struct sk_buff **skb); +int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + const u8 *param, size_t param_len); +int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb); +int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, + data_exchange_cb_t cb, void *cb_context); +int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, + const u8 *param, size_t param_len); +int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, + const u8 *param, size_t param_len); +int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate); +u32 nfc_hci_sak_to_protocol(u8 sak); + +#endif /* __NET_HCI_H */ diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h new file mode 100644 index 00000000000..c25fbdee0d6 --- /dev/null +++ b/include/net/nfc/llc.h @@ -0,0 +1,52 @@ +/* + * Link Layer Control manager public interface + * + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __NFC_LLC_H_ +#define __NFC_LLC_H_ + +#include <net/nfc/hci.h> +#include <linux/skbuff.h> + +#define LLC_NOP_NAME "nop" +#define LLC_SHDLC_NAME "shdlc" + +typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb); +typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb); +typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err); + +struct nfc_llc; + +struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev, + xmit_to_drv_t xmit_to_drv, + rcv_to_hci_t rcv_to_hci, int tx_headroom, + int tx_tailroom, llc_failure_t llc_failure); +void nfc_llc_free(struct nfc_llc *llc); + +void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom, + int *rx_tailroom); + + +int nfc_llc_start(struct nfc_llc *llc); +int nfc_llc_stop(struct nfc_llc *llc); +void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb); +int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb); + +int nfc_llc_init(void); +void nfc_llc_exit(void); + +#endif /* __NFC_LLC_H_ */ diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h new file mode 100644 index 00000000000..fbfa4e471ab --- /dev/null +++ b/include/net/nfc/nci.h @@ -0,0 +1,396 @@ +/* + * The NFC Controller Interface is the communication protocol between an + * NFC Controller (NFCC) and a Device Host (DH). + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * Written by Ilan Elias <ilane@ti.com> + * + * Acknowledgements: + * This file is based on hci.h, which was written + * by Maxim Krasnyansky. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef __NCI_H +#define __NCI_H + +/* NCI constants */ +#define NCI_MAX_NUM_MAPPING_CONFIGS 10 +#define NCI_MAX_NUM_RF_CONFIGS 10 +#define NCI_MAX_NUM_CONN 10 +#define NCI_MAX_PARAM_LEN 251 + +/* NCI Status Codes */ +#define NCI_STATUS_OK 0x00 +#define NCI_STATUS_REJECTED 0x01 +#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02 +#define NCI_STATUS_FAILED 0x03 +#define NCI_STATUS_NOT_INITIALIZED 0x04 +#define NCI_STATUS_SYNTAX_ERROR 0x05 +#define NCI_STATUS_SEMANTIC_ERROR 0x06 +#define NCI_STATUS_UNKNOWN_GID 0x07 +#define NCI_STATUS_UNKNOWN_OID 0x08 +#define NCI_STATUS_INVALID_PARAM 0x09 +#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a +/* Discovery Specific Status Codes */ +#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 +#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 +#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2 +/* RF Interface Specific Status Codes */ +#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 +#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 +#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 +/* NFCEE Interface Specific Status Codes */ +#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0 +#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1 +#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2 +#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3 + +/* NCI RF Technology and Mode */ +#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00 +#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01 +#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 +#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 +#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 +#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 +#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 +#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 +#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 +#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 +#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 +#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86 + +/* NCI RF Technologies */ +#define NCI_NFC_RF_TECHNOLOGY_A 0x00 +#define NCI_NFC_RF_TECHNOLOGY_B 0x01 +#define NCI_NFC_RF_TECHNOLOGY_F 0x02 +#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 + +/* NCI Bit Rates */ +#define NCI_NFC_BIT_RATE_106 0x00 +#define NCI_NFC_BIT_RATE_212 0x01 +#define NCI_NFC_BIT_RATE_424 0x02 +#define NCI_NFC_BIT_RATE_848 0x03 +#define NCI_NFC_BIT_RATE_1695 0x04 +#define NCI_NFC_BIT_RATE_3390 0x05 +#define NCI_NFC_BIT_RATE_6780 0x06 + +/* NCI RF Protocols */ +#define NCI_RF_PROTOCOL_UNKNOWN 0x00 +#define NCI_RF_PROTOCOL_T1T 0x01 +#define NCI_RF_PROTOCOL_T2T 0x02 +#define NCI_RF_PROTOCOL_T3T 0x03 +#define NCI_RF_PROTOCOL_ISO_DEP 0x04 +#define NCI_RF_PROTOCOL_NFC_DEP 0x05 + +/* NCI RF Interfaces */ +#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 +#define NCI_RF_INTERFACE_FRAME 0x01 +#define NCI_RF_INTERFACE_ISO_DEP 0x02 +#define NCI_RF_INTERFACE_NFC_DEP 0x03 + +/* NCI Configuration Parameter Tags */ +#define NCI_PN_ATR_REQ_GEN_BYTES 0x29 + +/* NCI Reset types */ +#define NCI_RESET_TYPE_KEEP_CONFIG 0x00 +#define NCI_RESET_TYPE_RESET_CONFIG 0x01 + +/* NCI Static RF connection ID */ +#define NCI_STATIC_RF_CONN_ID 0x00 + +/* NCI Data Flow Control */ +#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff + +/* NCI RF_DISCOVER_MAP_CMD modes */ +#define NCI_DISC_MAP_MODE_POLL 0x01 +#define NCI_DISC_MAP_MODE_LISTEN 0x02 + +/* NCI Discover Notification Type */ +#define NCI_DISCOVER_NTF_TYPE_LAST 0x00 +#define NCI_DISCOVER_NTF_TYPE_LAST_NFCC 0x01 +#define NCI_DISCOVER_NTF_TYPE_MORE 0x02 + +/* NCI Deactivation Type */ +#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 +#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 +#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 +#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03 + +/* Message Type (MT) */ +#define NCI_MT_DATA_PKT 0x00 +#define NCI_MT_CMD_PKT 0x01 +#define NCI_MT_RSP_PKT 0x02 +#define NCI_MT_NTF_PKT 0x03 + +#define nci_mt(hdr) (((hdr)[0]>>5)&0x07) +#define nci_mt_set(hdr, mt) ((hdr)[0] |= (__u8)(((mt)&0x07)<<5)) + +/* Packet Boundary Flag (PBF) */ +#define NCI_PBF_LAST 0x00 +#define NCI_PBF_CONT 0x01 + +#define nci_pbf(hdr) (__u8)(((hdr)[0]>>4)&0x01) +#define nci_pbf_set(hdr, pbf) ((hdr)[0] |= (__u8)(((pbf)&0x01)<<4)) + +/* Control Opcode manipulation */ +#define nci_opcode_pack(gid, oid) (__u16)((((__u16)((gid)&0x0f))<<8)|\ + ((__u16)((oid)&0x3f))) +#define nci_opcode(hdr) nci_opcode_pack(hdr[0], hdr[1]) +#define nci_opcode_gid(op) (__u8)(((op)&0x0f00)>>8) +#define nci_opcode_oid(op) (__u8)((op)&0x003f) + +/* Payload Length */ +#define nci_plen(hdr) (__u8)((hdr)[2]) + +/* Connection ID */ +#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f) + +/* GID values */ +#define NCI_GID_CORE 0x0 +#define NCI_GID_RF_MGMT 0x1 +#define NCI_GID_NFCEE_MGMT 0x2 +#define NCI_GID_PROPRIETARY 0xf + +/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */ +#define NCI_SPI_HDR_LEN 4 +#define NCI_SPI_CRC_LEN 2 + +/* ---- NCI Packet structures ---- */ +#define NCI_CTRL_HDR_SIZE 3 +#define NCI_DATA_HDR_SIZE 3 + +struct nci_ctrl_hdr { + __u8 gid; /* MT & PBF & GID */ + __u8 oid; + __u8 plen; +} __packed; + +struct nci_data_hdr { + __u8 conn_id; /* MT & PBF & ConnID */ + __u8 rfu; + __u8 plen; +} __packed; + +/* ------------------------ */ +/* ----- NCI Commands ---- */ +/* ------------------------ */ +#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00) +struct nci_core_reset_cmd { + __u8 reset_type; +} __packed; + +#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) + +#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02) +struct set_config_param { + __u8 id; + __u8 len; + __u8 val[NCI_MAX_PARAM_LEN]; +} __packed; + +struct nci_core_set_config_cmd { + __u8 num_params; + struct set_config_param param; /* support 1 param per cmd is enough */ +} __packed; + +#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) +struct disc_map_config { + __u8 rf_protocol; + __u8 mode; + __u8 rf_interface; +} __packed; + +struct nci_rf_disc_map_cmd { + __u8 num_mapping_configs; + struct disc_map_config mapping_configs + [NCI_MAX_NUM_MAPPING_CONFIGS]; +} __packed; + +#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) +struct disc_config { + __u8 rf_tech_and_mode; + __u8 frequency; +} __packed; + +struct nci_rf_disc_cmd { + __u8 num_disc_configs; + struct disc_config disc_configs[NCI_MAX_NUM_RF_CONFIGS]; +} __packed; + +#define NCI_OP_RF_DISCOVER_SELECT_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x04) +struct nci_rf_discover_select_cmd { + __u8 rf_discovery_id; + __u8 rf_protocol; + __u8 rf_interface; +} __packed; + +#define NCI_OP_RF_DEACTIVATE_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) +struct nci_rf_deactivate_cmd { + __u8 type; +} __packed; + +/* ----------------------- */ +/* ---- NCI Responses ---- */ +/* ----------------------- */ +#define NCI_OP_CORE_RESET_RSP nci_opcode_pack(NCI_GID_CORE, 0x00) +struct nci_core_reset_rsp { + __u8 status; + __u8 nci_ver; + __u8 config_status; +} __packed; + +#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01) +struct nci_core_init_rsp_1 { + __u8 status; + __le32 nfcc_features; + __u8 num_supported_rf_interfaces; + __u8 supported_rf_interfaces[0]; /* variable size array */ + /* continuted in nci_core_init_rsp_2 */ +} __packed; + +struct nci_core_init_rsp_2 { + __u8 max_logical_connections; + __le16 max_routing_table_size; + __u8 max_ctrl_pkt_payload_len; + __le16 max_size_for_large_params; + __u8 manufact_id; + __le32 manufact_specific_info; +} __packed; + +#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02) +struct nci_core_set_config_rsp { + __u8 status; + __u8 num_params; + __u8 params_id[0]; /* variable size array */ +} __packed; + +#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) + +#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) + +#define NCI_OP_RF_DISCOVER_SELECT_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x04) + +#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) + +/* --------------------------- */ +/* ---- NCI Notifications ---- */ +/* --------------------------- */ +#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06) +struct conn_credit_entry { + __u8 conn_id; + __u8 credits; +} __packed; + +struct nci_core_conn_credit_ntf { + __u8 num_entries; + struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN]; +} __packed; + +#define NCI_OP_CORE_GENERIC_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x07) + +#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) +struct nci_core_intf_error_ntf { + __u8 status; + __u8 conn_id; +} __packed; + +#define NCI_OP_RF_DISCOVER_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) +struct rf_tech_specific_params_nfca_poll { + __u16 sens_res; + __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ + __u8 nfcid1[10]; + __u8 sel_res_len; /* 0 or 1 Bytes */ + __u8 sel_res; +} __packed; + +struct rf_tech_specific_params_nfcb_poll { + __u8 sensb_res_len; + __u8 sensb_res[12]; /* 11 or 12 Bytes */ +} __packed; + +struct rf_tech_specific_params_nfcf_poll { + __u8 bit_rate; + __u8 sensf_res_len; + __u8 sensf_res[18]; /* 16 or 18 Bytes */ +} __packed; + +struct nci_rf_discover_ntf { + __u8 rf_discovery_id; + __u8 rf_protocol; + __u8 rf_tech_and_mode; + __u8 rf_tech_specific_params_len; + + union { + struct rf_tech_specific_params_nfca_poll nfca_poll; + struct rf_tech_specific_params_nfcb_poll nfcb_poll; + struct rf_tech_specific_params_nfcf_poll nfcf_poll; + } rf_tech_specific_params; + + __u8 ntf_type; +} __packed; + +#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) +struct activation_params_nfca_poll_iso_dep { + __u8 rats_res_len; + __u8 rats_res[20]; +}; + +struct activation_params_nfcb_poll_iso_dep { + __u8 attrib_res_len; + __u8 attrib_res[50]; +}; + +struct activation_params_poll_nfc_dep { + __u8 atr_res_len; + __u8 atr_res[63]; +}; + +struct nci_rf_intf_activated_ntf { + __u8 rf_discovery_id; + __u8 rf_interface; + __u8 rf_protocol; + __u8 activation_rf_tech_and_mode; + __u8 max_data_pkt_payload_size; + __u8 initial_num_credits; + __u8 rf_tech_specific_params_len; + + union { + struct rf_tech_specific_params_nfca_poll nfca_poll; + struct rf_tech_specific_params_nfcb_poll nfcb_poll; + struct rf_tech_specific_params_nfcf_poll nfcf_poll; + } rf_tech_specific_params; + + __u8 data_exch_rf_tech_and_mode; + __u8 data_exch_tx_bit_rate; + __u8 data_exch_rx_bit_rate; + __u8 activation_params_len; + + union { + struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep; + struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep; + struct activation_params_poll_nfc_dep poll_nfc_dep; + } activation_params; + +} __packed; + +#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) +struct nci_rf_deactivate_ntf { + __u8 type; + __u8 reason; +} __packed; + +#endif /* __NCI_H */ diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h new file mode 100644 index 00000000000..1f9a0f5272f --- /dev/null +++ b/include/net/nfc/nci_core.h @@ -0,0 +1,232 @@ +/* + * The NFC Controller Interface is the communication protocol between an + * NFC Controller (NFCC) and a Device Host (DH). + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2013 Intel Corporation. All rights reserved. + * + * Written by Ilan Elias <ilane@ti.com> + * + * Acknowledgements: + * This file is based on hci_core.h, which was written + * by Maxim Krasnyansky. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef __NCI_CORE_H +#define __NCI_CORE_H + +#include <linux/interrupt.h> +#include <linux/skbuff.h> + +#include <net/nfc/nfc.h> +#include <net/nfc/nci.h> + +/* NCI device flags */ +enum nci_flag { + NCI_INIT, + NCI_UP, + NCI_DATA_EXCHANGE, + NCI_DATA_EXCHANGE_TO, +}; + +/* NCI device states */ +enum nci_state { + NCI_IDLE, + NCI_DISCOVERY, + NCI_W4_ALL_DISCOVERIES, + NCI_W4_HOST_SELECT, + NCI_POLL_ACTIVE, +}; + +/* NCI timeouts */ +#define NCI_RESET_TIMEOUT 5000 +#define NCI_INIT_TIMEOUT 5000 +#define NCI_SET_CONFIG_TIMEOUT 5000 +#define NCI_RF_DISC_TIMEOUT 5000 +#define NCI_RF_DISC_SELECT_TIMEOUT 5000 +#define NCI_RF_DEACTIVATE_TIMEOUT 30000 +#define NCI_CMD_TIMEOUT 5000 +#define NCI_DATA_TIMEOUT 700 + +struct nci_dev; + +struct nci_ops { + int (*open)(struct nci_dev *ndev); + int (*close)(struct nci_dev *ndev); + int (*send)(struct nci_dev *ndev, struct sk_buff *skb); + int (*setup)(struct nci_dev *ndev); +}; + +#define NCI_MAX_SUPPORTED_RF_INTERFACES 4 +#define NCI_MAX_DISCOVERED_TARGETS 10 + +/* NCI Core structures */ +struct nci_dev { + struct nfc_dev *nfc_dev; + struct nci_ops *ops; + + int tx_headroom; + int tx_tailroom; + + atomic_t state; + unsigned long flags; + + atomic_t cmd_cnt; + atomic_t credits_cnt; + + struct timer_list cmd_timer; + struct timer_list data_timer; + + struct workqueue_struct *cmd_wq; + struct work_struct cmd_work; + + struct workqueue_struct *rx_wq; + struct work_struct rx_work; + + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + + struct sk_buff_head cmd_q; + struct sk_buff_head rx_q; + struct sk_buff_head tx_q; + + struct mutex req_lock; + struct completion req_completion; + __u32 req_status; + __u32 req_result; + + void *driver_data; + + __u32 poll_prots; + __u32 target_active_prot; + + struct nfc_target targets[NCI_MAX_DISCOVERED_TARGETS]; + int n_targets; + + /* received during NCI_OP_CORE_RESET_RSP */ + __u8 nci_ver; + + /* received during NCI_OP_CORE_INIT_RSP */ + __u32 nfcc_features; + __u8 num_supported_rf_interfaces; + __u8 supported_rf_interfaces + [NCI_MAX_SUPPORTED_RF_INTERFACES]; + __u8 max_logical_connections; + __u16 max_routing_table_size; + __u8 max_ctrl_pkt_payload_len; + __u16 max_size_for_large_params; + __u8 manufact_id; + __u32 manufact_specific_info; + + /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */ + __u8 max_data_pkt_payload_size; + __u8 initial_num_credits; + + /* stored during nci_data_exchange */ + data_exchange_cb_t data_exchange_cb; + void *data_exchange_cb_context; + struct sk_buff *rx_data_reassembly; + + /* stored during intf_activated_ntf */ + __u8 remote_gb[NFC_MAX_GT_LEN]; + __u8 remote_gb_len; +}; + +/* ----- NCI Devices ----- */ +struct nci_dev *nci_allocate_device(struct nci_ops *ops, + __u32 supported_protocols, + int tx_headroom, + int tx_tailroom); +void nci_free_device(struct nci_dev *ndev); +int nci_register_device(struct nci_dev *ndev); +void nci_unregister_device(struct nci_dev *ndev); +int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); +int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); + +static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, + unsigned int len, + gfp_t how) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + ndev->tx_headroom + ndev->tx_tailroom, how); + if (skb) + skb_reserve(skb, ndev->tx_headroom); + + return skb; +} + +static inline void nci_set_parent_dev(struct nci_dev *ndev, struct device *dev) +{ + nfc_set_parent_dev(ndev->nfc_dev, dev); +} + +static inline void nci_set_drvdata(struct nci_dev *ndev, void *data) +{ + ndev->driver_data = data; +} + +static inline void *nci_get_drvdata(struct nci_dev *ndev) +{ + return ndev->driver_data; +} + +void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb); +void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb); +void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); +int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); +int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); +void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, + int err); +void nci_clear_target_list(struct nci_dev *ndev); + +/* ----- NCI requests ----- */ +#define NCI_REQ_DONE 0 +#define NCI_REQ_PEND 1 +#define NCI_REQ_CANCELED 2 + +void nci_req_complete(struct nci_dev *ndev, int result); + +/* ----- NCI status code ----- */ +int nci_to_errno(__u8 code); + +/* ----- NCI over SPI acknowledge modes ----- */ +#define NCI_SPI_CRC_DISABLED 0x00 +#define NCI_SPI_CRC_ENABLED 0x01 + +/* ----- NCI SPI structures ----- */ +struct nci_spi { + struct nci_dev *ndev; + struct spi_device *spi; + + unsigned int xfer_udelay; /* microseconds delay between + transactions */ + u8 acknowledge_mode; + + struct completion req_completion; + u8 req_result; +}; + +/* ----- NCI SPI ----- */ +struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi, + u8 acknowledge_mode, unsigned int delay, + struct nci_dev *ndev); +int nci_spi_send(struct nci_spi *nspi, + struct completion *write_handshake_completion, + struct sk_buff *skb); +struct sk_buff *nci_spi_read(struct nci_spi *nspi); + +#endif /* __NCI_CORE_H */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h new file mode 100644 index 00000000000..6c583e244de --- /dev/null +++ b/include/net/nfc/nfc.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * + * Authors: + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __NET_NFC_H +#define __NET_NFC_H + +#include <linux/nfc.h> +#include <linux/device.h> +#include <linux/skbuff.h> + +#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__) +#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__) + +struct nfc_phy_ops { + int (*write)(void *dev_id, struct sk_buff *skb); + int (*enable)(void *dev_id); + void (*disable)(void *dev_id); +}; + +struct nfc_dev; + +/** + * data_exchange_cb_t - Definition of nfc_data_exchange callback + * + * @context: nfc_data_exchange cb_context parameter + * @skb: response data + * @err: If an error has occurred during data exchange, it is the + * error number. Zero means no error. + * + * When a rx or tx package is lost or corrupted or the target gets out + * of the operating field, err is -EIO. + */ +typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb, + int err); + +typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err); + +struct nfc_target; + +struct nfc_ops { + int (*dev_up)(struct nfc_dev *dev); + int (*dev_down)(struct nfc_dev *dev); + int (*start_poll)(struct nfc_dev *dev, + u32 im_protocols, u32 tm_protocols); + void (*stop_poll)(struct nfc_dev *dev); + int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target, + u8 comm_mode, u8 *gb, size_t gb_len); + int (*dep_link_down)(struct nfc_dev *dev); + int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target, + u32 protocol); + void (*deactivate_target)(struct nfc_dev *dev, + struct nfc_target *target); + int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target, + struct sk_buff *skb, data_exchange_cb_t cb, + void *cb_context); + int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); + int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); + int (*fw_download)(struct nfc_dev *dev, const char *firmware_name); + + /* Secure Element API */ + int (*discover_se)(struct nfc_dev *dev); + int (*enable_se)(struct nfc_dev *dev, u32 se_idx); + int (*disable_se)(struct nfc_dev *dev, u32 se_idx); + int (*se_io) (struct nfc_dev *dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); +}; + +#define NFC_TARGET_IDX_ANY -1 +#define NFC_MAX_GT_LEN 48 +#define NFC_ATR_RES_GT_OFFSET 15 + +/** + * struct nfc_target - NFC target descriptiom + * + * @sens_res: 2 bytes describing the target SENS_RES response, if the target + * is a type A one. The %sens_res most significant byte must be byte 2 + * as described by the NFC Forum digital specification (i.e. the platform + * configuration one) while %sens_res least significant byte is byte 1. + */ +struct nfc_target { + u32 idx; + u32 supported_protocols; + u16 sens_res; + u8 sel_res; + u8 nfcid1_len; + u8 nfcid1[NFC_NFCID1_MAXSIZE]; + u8 nfcid2_len; + u8 nfcid2[NFC_NFCID2_MAXSIZE]; + u8 sensb_res_len; + u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; + u8 sensf_res_len; + u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; + u8 hci_reader_gate; + u8 logical_idx; + u8 is_iso15693; + u8 iso15693_dsfid; + u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE]; +}; + +/** + * nfc_se - A structure for NFC accessible secure elements. + * + * @idx: The secure element index. User space will enable or + * disable a secure element by its index. + * @type: The secure element type. It can be SE_UICC or + * SE_EMBEDDED. + * @state: The secure element state, either enabled or disabled. + * + */ +struct nfc_se { + struct list_head list; + u32 idx; + u16 type; + u16 state; +}; + +struct nfc_genl_data { + u32 poll_req_portid; + struct mutex genl_data_mutex; +}; + +struct nfc_dev { + int idx; + u32 target_next_idx; + struct nfc_target *targets; + int n_targets; + int targets_generation; + struct device dev; + bool dev_up; + bool fw_download_in_progress; + u8 rf_mode; + bool polling; + struct nfc_target *active_target; + bool dep_link_up; + struct nfc_genl_data genl_data; + u32 supported_protocols; + + struct list_head secure_elements; + + int tx_headroom; + int tx_tailroom; + + struct timer_list check_pres_timer; + struct work_struct check_pres_work; + + bool shutting_down; + + struct rfkill *rfkill; + + struct nfc_ops *ops; +}; +#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) + +extern struct class nfc_class; + +struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, + u32 supported_protocols, + int tx_headroom, + int tx_tailroom); + +/** + * nfc_free_device - free nfc device + * + * @dev: The nfc device to free + */ +static inline void nfc_free_device(struct nfc_dev *dev) +{ + put_device(&dev->dev); +} + +int nfc_register_device(struct nfc_dev *dev); + +void nfc_unregister_device(struct nfc_dev *dev); + +/** + * nfc_set_parent_dev - set the parent device + * + * @nfc_dev: The nfc device whose parent is being set + * @dev: The parent device + */ +static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev, + struct device *dev) +{ + nfc_dev->dev.parent = dev; +} + +/** + * nfc_set_drvdata - set driver specifc data + * + * @dev: The nfc device + * @data: Pointer to driver specifc data + */ +static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +/** + * nfc_get_drvdata - get driver specifc data + * + * @dev: The nfc device + */ +static inline void *nfc_get_drvdata(struct nfc_dev *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +/** + * nfc_device_name - get the nfc device name + * + * @dev: The nfc device whose name to return + */ +static inline const char *nfc_device_name(struct nfc_dev *dev) +{ + return dev_name(&dev->dev); +} + +struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, + unsigned int flags, unsigned int size, + unsigned int *err); +struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp); + +int nfc_set_remote_general_bytes(struct nfc_dev *dev, + u8 *gt, u8 gt_len); +u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len); + +int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + u32 result); + +int nfc_targets_found(struct nfc_dev *dev, + struct nfc_target *targets, int ntargets); +int nfc_target_lost(struct nfc_dev *dev, u32 target_idx); + +int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, + u8 comm_mode, u8 rf_mode); + +int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, + u8 *gb, size_t gb_len); +int nfc_tm_deactivated(struct nfc_dev *dev); +int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb); + +void nfc_driver_failure(struct nfc_dev *dev, int err); + +int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type); +int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); +struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx); + +void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, + u8 payload_type, u8 direction); + +#endif /* __NET_NFC_H */ diff --git a/include/net/nl802154.h b/include/net/nl802154.h new file mode 100644 index 00000000000..b23548e0409 --- /dev/null +++ b/include/net/nl802154.h @@ -0,0 +1,126 @@ +/* + * nl802154.h + * + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef IEEE802154_NL_H +#define IEEE802154_NL_H + +struct net_device; +struct ieee802154_addr; + +/** + * ieee802154_nl_assoc_indic - Notify userland of an association request. + * @dev: The network device on which this association request was + * received. + * @addr: The address of the device requesting association. + * @cap: The capability information field from the device. + * + * This informs a userland coordinator of a device requesting to + * associate with the PAN controlled by the coordinator. + * + * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document. + */ +int ieee802154_nl_assoc_indic(struct net_device *dev, + struct ieee802154_addr *addr, u8 cap); + +/** + * ieee802154_nl_assoc_confirm - Notify userland of association. + * @dev: The device which has completed association. + * @short_addr: The short address assigned to the device. + * @status: The status of the association. + * + * Inform userland of the result of an association request. If the + * association request included asking the coordinator to allocate + * a short address then it is returned in @short_addr. + * + * Note: This is in section 7.3.2 of the IEEE 802.15.4 document. + */ +int ieee802154_nl_assoc_confirm(struct net_device *dev, + __le16 short_addr, u8 status); + +/** + * ieee802154_nl_disassoc_indic - Notify userland of disassociation. + * @dev: The device on which disassociation was indicated. + * @addr: The device which is disassociating. + * @reason: The reason for the disassociation. + * + * Inform userland that a device has disassociated from the network. + * + * Note: This is in section 7.3.3 of the IEEE 802.15.4 document. + */ +int ieee802154_nl_disassoc_indic(struct net_device *dev, + struct ieee802154_addr *addr, u8 reason); + +/** + * ieee802154_nl_disassoc_confirm - Notify userland of disassociation + * completion. + * @dev: The device on which disassociation was ordered. + * @status: The result of the disassociation. + * + * Inform userland of the result of requesting that a device + * disassociate, or the result of requesting that we disassociate from + * a PAN managed by another coordinator. + * + * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document. + */ +int ieee802154_nl_disassoc_confirm(struct net_device *dev, + u8 status); + +/** + * ieee802154_nl_scan_confirm - Notify userland of completion of scan. + * @dev: The device which was instructed to scan. + * @status: The status of the scan operation. + * @scan_type: What type of scan was performed. + * @unscanned: Any channels that the device was unable to scan. + * @edl: The energy levels (if a passive scan). + * + * + * Note: This is in section 7.1.11 of the IEEE 802.15.4 document. + * Note: This API does not permit the return of an active scan result. + */ +int ieee802154_nl_scan_confirm(struct net_device *dev, + u8 status, u8 scan_type, u32 unscanned, u8 page, + u8 *edl/*, struct list_head *pan_desc_list */); + +/** + * ieee802154_nl_beacon_indic - Notify userland of a received beacon. + * @dev: The device on which a beacon was received. + * @panid: The PAN of the coordinator. + * @coord_addr: The short address of the coordinator on that PAN. + * + * Note: This is in section 7.1.5 of the IEEE 802.15.4 document. + * Note: This API does not provide extended information such as what + * channel the PAN is on or what the LQI of the beacon frame was on + * receipt. + * Note: This API cannot indicate a beacon frame for a coordinator + * operating in long addressing mode. + */ +int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, + __le16 coord_addr); + +/** + * ieee802154_nl_start_confirm - Notify userland of completion of start. + * @dev: The device which was instructed to scan. + * @status: The status of the scan operation. + * + * Note: This is in section 7.1.14 of the IEEE 802.15.4 document. + */ +int ieee802154_nl_start_confirm(struct net_device *dev, u8 status); + +#endif diff --git a/include/net/p8022.h b/include/net/p8022.h index 42e9fac51b3..05e41383856 100644 --- a/include/net/p8022.h +++ b/include/net/p8022.h @@ -1,13 +1,13 @@ #ifndef _NET_P8022_H #define _NET_P8022_H -extern struct datalink_proto * - register_8022_client(unsigned char type, - int (*func)(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev)); -extern void unregister_8022_client(struct datalink_proto *proto); +struct datalink_proto * +register_8022_client(unsigned char type, + int (*func)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev)); +void unregister_8022_client(struct datalink_proto *proto); -extern struct datalink_proto *make_8023_client(void); -extern void destroy_8023_client(struct datalink_proto *dl); +struct datalink_proto *make_8023_client(void); +void destroy_8023_client(struct datalink_proto *dl); #endif diff --git a/include/net/phonet/gprs.h b/include/net/phonet/gprs.h new file mode 100644 index 00000000000..bcd525e39a0 --- /dev/null +++ b/include/net/phonet/gprs.h @@ -0,0 +1,38 @@ +/* + * File: pep_gprs.h + * + * GPRS over Phonet pipe end point socket + * + * Copyright (C) 2008 Nokia Corporation. + * + * Author: Rémi Denis-Courmont + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef NET_PHONET_GPRS_H +#define NET_PHONET_GPRS_H + +struct sock; +struct sk_buff; + +int pep_writeable(struct sock *sk); +int pep_write(struct sock *sk, struct sk_buff *skb); +struct sk_buff *pep_read(struct sock *sk); + +int gprs_attach(struct sock *sk); +void gprs_detach(struct sock *sk); + +#endif diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h new file mode 100644 index 00000000000..b669fe6dbc3 --- /dev/null +++ b/include/net/phonet/pep.h @@ -0,0 +1,168 @@ +/* + * File: pep.h + * + * Phonet Pipe End Point sockets definitions + * + * Copyright (C) 2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef NET_PHONET_PEP_H +#define NET_PHONET_PEP_H + +struct pep_sock { + struct pn_sock pn_sk; + + /* XXX: union-ify listening vs connected stuff ? */ + /* Listening socket stuff: */ + struct hlist_head hlist; + + /* Connected socket stuff: */ + struct sock *listener; + struct sk_buff_head ctrlreq_queue; +#define PNPIPE_CTRLREQ_MAX 10 + atomic_t tx_credits; + int ifindex; + u16 peer_type; /* peer type/subtype */ + u8 pipe_handle; + + u8 rx_credits; + u8 rx_fc; /* RX flow control */ + u8 tx_fc; /* TX flow control */ + u8 init_enable; /* auto-enable at creation */ + u8 aligned; +}; + +static inline struct pep_sock *pep_sk(struct sock *sk) +{ + return (struct pep_sock *)sk; +} + +extern const struct proto_ops phonet_stream_ops; + +/* Pipe protocol definitions */ +struct pnpipehdr { + u8 utid; /* transaction ID */ + u8 message_id; + u8 pipe_handle; + union { + u8 state_after_connect; /* connect request */ + u8 state_after_reset; /* reset request */ + u8 error_code; /* any response */ + u8 pep_type; /* status indication */ + u8 data[1]; + }; +}; +#define other_pep_type data[1] + +static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) +{ + return (struct pnpipehdr *)skb_transport_header(skb); +} + +#define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4) + +enum { + PNS_PIPE_CREATE_REQ = 0x00, + PNS_PIPE_CREATE_RESP, + PNS_PIPE_REMOVE_REQ, + PNS_PIPE_REMOVE_RESP, + + PNS_PIPE_DATA = 0x20, + PNS_PIPE_ALIGNED_DATA, + + PNS_PEP_CONNECT_REQ = 0x40, + PNS_PEP_CONNECT_RESP, + PNS_PEP_DISCONNECT_REQ, + PNS_PEP_DISCONNECT_RESP, + PNS_PEP_RESET_REQ, + PNS_PEP_RESET_RESP, + PNS_PEP_ENABLE_REQ, + PNS_PEP_ENABLE_RESP, + PNS_PEP_CTRL_REQ, + PNS_PEP_CTRL_RESP, + PNS_PEP_DISABLE_REQ = 0x4C, + PNS_PEP_DISABLE_RESP, + + PNS_PEP_STATUS_IND = 0x60, + PNS_PIPE_CREATED_IND, + PNS_PIPE_RESET_IND = 0x63, + PNS_PIPE_ENABLED_IND, + PNS_PIPE_REDIRECTED_IND, + PNS_PIPE_DISABLED_IND = 0x66, +}; + +#define PN_PIPE_INVALID_HANDLE 0xff +#define PN_PEP_TYPE_COMMON 0x00 + +/* Phonet pipe status indication */ +enum { + PN_PEP_IND_FLOW_CONTROL, + PN_PEP_IND_ID_MCFC_GRANT_CREDITS, +}; + +/* Phonet pipe error codes */ +enum { + PN_PIPE_NO_ERROR, + PN_PIPE_ERR_INVALID_PARAM, + PN_PIPE_ERR_INVALID_HANDLE, + PN_PIPE_ERR_INVALID_CTRL_ID, + PN_PIPE_ERR_NOT_ALLOWED, + PN_PIPE_ERR_PEP_IN_USE, + PN_PIPE_ERR_OVERLOAD, + PN_PIPE_ERR_DEV_DISCONNECTED, + PN_PIPE_ERR_TIMEOUT, + PN_PIPE_ERR_ALL_PIPES_IN_USE, + PN_PIPE_ERR_GENERAL, + PN_PIPE_ERR_NOT_SUPPORTED, +}; + +/* Phonet pipe states */ +enum { + PN_PIPE_DISABLE, + PN_PIPE_ENABLE, +}; + +/* Phonet pipe sub-block types */ +enum { + PN_PIPE_SB_CREATE_REQ_PEP_SUB_TYPE, + PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE, + PN_PIPE_SB_REDIRECT_REQ_PEP_SUB_TYPE, + PN_PIPE_SB_NEGOTIATED_FC, + PN_PIPE_SB_REQUIRED_FC_TX, + PN_PIPE_SB_PREFERRED_FC_RX, + PN_PIPE_SB_ALIGNED_DATA, +}; + +/* Phonet pipe flow control models */ +enum { + PN_NO_FLOW_CONTROL, + PN_LEGACY_FLOW_CONTROL, + PN_ONE_CREDIT_FLOW_CONTROL, + PN_MULTI_CREDIT_FLOW_CONTROL, + PN_MAX_FLOW_CONTROL, +}; + +#define pn_flow_safe(fc) ((fc) >> 1) + +/* Phonet pipe flow control states */ +enum { + PEP_IND_EMPTY, + PEP_IND_BUSY, + PEP_IND_READY, +}; + +#endif diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h new file mode 100644 index 00000000000..68e509750ca --- /dev/null +++ b/include/net/phonet/phonet.h @@ -0,0 +1,119 @@ +/* + * File: af_phonet.h + * + * Phonet sockets kernel definitions + * + * Copyright (C) 2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef AF_PHONET_H +#define AF_PHONET_H + +/* + * The lower layers may not require more space, ever. Make sure it's + * enough. + */ +#define MAX_PHONET_HEADER (8 + MAX_HEADER) + +/* + * Every Phonet* socket has this structure first in its + * protocol-specific structure under name c. + */ +struct pn_sock { + struct sock sk; + u16 sobject; + u16 dobject; + u8 resource; +}; + +static inline struct pn_sock *pn_sk(struct sock *sk) +{ + return (struct pn_sock *)sk; +} + +extern const struct proto_ops phonet_dgram_ops; + +void pn_sock_init(void); +struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); +void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb); +void phonet_get_local_port_range(int *min, int *max); +void pn_sock_hash(struct sock *sk); +void pn_sock_unhash(struct sock *sk); +int pn_sock_get_port(struct sock *sk, unsigned short sport); + +struct sock *pn_find_sock_by_res(struct net *net, u8 res); +int pn_sock_bind_res(struct sock *sock, u8 res); +int pn_sock_unbind_res(struct sock *sk, u8 res); +void pn_sock_unbind_all_res(struct sock *sk); + +int pn_skb_send(struct sock *sk, struct sk_buff *skb, + const struct sockaddr_pn *target); + +static inline struct phonethdr *pn_hdr(struct sk_buff *skb) +{ + return (struct phonethdr *)skb_network_header(skb); +} + +static inline struct phonetmsg *pn_msg(struct sk_buff *skb) +{ + return (struct phonetmsg *)skb_transport_header(skb); +} + +/* + * Get the other party's sockaddr from received skb. The skb begins + * with a Phonet header. + */ +static inline +void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) +{ + struct phonethdr *ph = pn_hdr(skb); + u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj); + + sa->spn_family = AF_PHONET; + pn_sockaddr_set_object(sa, obj); + pn_sockaddr_set_resource(sa, ph->pn_res); + memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); +} + +static inline +void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) +{ + struct phonethdr *ph = pn_hdr(skb); + u16 obj = pn_object(ph->pn_rdev, ph->pn_robj); + + sa->spn_family = AF_PHONET; + pn_sockaddr_set_object(sa, obj); + pn_sockaddr_set_resource(sa, ph->pn_res); + memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); +} + +/* Protocols in Phonet protocol family. */ +struct phonet_protocol { + const struct proto_ops *ops; + struct proto *prot; + int sock_type; +}; + +int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp); +void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp); + +int phonet_sysctl_init(void); +void phonet_sysctl_exit(void); +int isi_register(void); +void isi_unregister(void); + +#endif diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h new file mode 100644 index 00000000000..8639de5750f --- /dev/null +++ b/include/net/phonet/pn_dev.h @@ -0,0 +1,62 @@ +/* + * File: pn_dev.h + * + * Phonet network device + * + * Copyright (C) 2008 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef PN_DEV_H +#define PN_DEV_H + +struct phonet_device_list { + struct list_head list; + struct mutex lock; +}; + +struct phonet_device_list *phonet_device_list(struct net *net); + +struct phonet_device { + struct list_head list; + struct net_device *netdev; + DECLARE_BITMAP(addrs, 64); + struct rcu_head rcu; +}; + +int phonet_device_init(void); +void phonet_device_exit(void); +int phonet_netlink_register(void); +struct net_device *phonet_device_get(struct net *net); + +int phonet_address_add(struct net_device *dev, u8 addr); +int phonet_address_del(struct net_device *dev, u8 addr); +u8 phonet_address_get(struct net_device *dev, u8 addr); +int phonet_address_lookup(struct net *net, u8 addr); +void phonet_address_notify(int event, struct net_device *dev, u8 addr); + +int phonet_route_add(struct net_device *dev, u8 daddr); +int phonet_route_del(struct net_device *dev, u8 daddr); +void rtm_phonet_notify(int event, struct net_device *dev, u8 dst); +struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr); +struct net_device *phonet_route_output(struct net *net, u8 daddr); + +#define PN_NO_ADDR 0xff + +extern const struct file_operations pn_sock_seq_fops; +extern const struct file_operations pn_res_seq_fops; + +#endif diff --git a/include/net/ping.h b/include/net/ping.h new file mode 100644 index 00000000000..026479b61a2 --- /dev/null +++ b/include/net/ping.h @@ -0,0 +1,111 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the "ping" module. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _PING_H +#define _PING_H + +#include <net/icmp.h> +#include <net/netns/hash.h> + +/* PING_HTABLE_SIZE must be power of 2 */ +#define PING_HTABLE_SIZE 64 +#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1) + +#define ping_portaddr_for_each_entry(__sk, node, list) \ + hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) + +/* + * gid_t is either uint or ushort. We want to pass it to + * proc_dointvec_minmax(), so it must not be larger than MAX_INT + */ +#define GID_T_MAX (((gid_t)~0U) >> 1) + +/* Compatibility glue so we can support IPv6 when it's compiled as a module */ +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); + void (*ip6_datagram_recv_common_ctl)(struct sock *sk, + struct msghdr *msg, + struct sk_buff *skb); + void (*ip6_datagram_recv_specific_ctl)(struct sock *sk, + struct msghdr *msg, + struct sk_buff *skb); + int (*icmpv6_err_convert)(u8 type, u8 code, int *err); + void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err, + __be16 port, u32 info, u8 *payload); + int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict); +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +extern struct proto ping_prot; +#if IS_ENABLED(CONFIG_IPV6) +extern struct pingv6_ops pingv6_ops; +#endif + +struct pingfakehdr { + struct icmphdr icmph; + struct iovec *iov; + sa_family_t family; + __wsum wcheck; +}; + +int ping_get_port(struct sock *sk, unsigned short ident); +void ping_hash(struct sock *sk); +void ping_unhash(struct sock *sk); + +int ping_init_sock(struct sock *sk); +void ping_close(struct sock *sk, long timeout); +int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void ping_err(struct sk_buff *skb, int offset, u32 info); +int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, + struct sk_buff *); + +int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len, int noblock, int flags, int *addr_len); +int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, + void *user_icmph, size_t icmph_len); +int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len); +int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +void ping_rcv(struct sk_buff *skb); + +#ifdef CONFIG_PROC_FS +struct ping_seq_afinfo { + char *name; + sa_family_t family; + const struct file_operations *seq_fops; + const struct seq_operations seq_ops; +}; + +extern const struct file_operations ping_seq_fops; + +void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family); +void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos); +void ping_seq_stop(struct seq_file *seq, void *v); +int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo); +void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo); + +int __init ping_proc_init(void); +void ping_proc_exit(void); +#endif + +void __init ping_init(void); +int __init pingv6_init(void); +void pingv6_exit(void); + +#endif /* _PING_H */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index d349c66ef82..6da46dcf104 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -7,16 +7,15 @@ /* Basic packet classifier frontend definitions. */ -struct tcf_walker -{ +struct tcf_walker { int stop; int skip; int count; int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *); }; -extern int register_tcf_proto_ops(struct tcf_proto_ops *ops); -extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +int register_tcf_proto_ops(struct tcf_proto_ops *ops); +int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) @@ -61,22 +60,28 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); } -struct tcf_exts -{ +struct tcf_exts { #ifdef CONFIG_NET_CLS_ACT - struct tc_action *action; + __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ + struct list_head actions; #endif -}; - -/* Map to export classifier specific extension TLV types to the - * generic extensions API. Unsupported extensions must be set to 0. - */ -struct tcf_ext_map -{ + /* Map to export classifier specific extension TLV types to the + * generic extensions API. Unsupported extensions must be set to 0. + */ int action; int police; }; +static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police) +{ +#ifdef CONFIG_NET_CLS_ACT + exts->type = 0; + INIT_LIST_HEAD(&exts->actions); +#endif + exts->action = action; + exts->police = police; +} + /** * tcf_exts_is_predicative - check if a predicative extension is present * @exts: tc filter extensions handle @@ -88,7 +93,7 @@ static inline int tcf_exts_is_predicative(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT - return !!exts->action; + return !list_empty(&exts->actions); #else return 0; #endif @@ -123,28 +128,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, struct tcf_result *res) { #ifdef CONFIG_NET_CLS_ACT - if (exts->action) - return tcf_action_exec(skb, exts->action, res); + if (!list_empty(&exts->actions)) + return tcf_action_exec(skb, &exts->actions, res); #endif return 0; } -extern int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb, - struct nlattr *rate_tlv, struct tcf_exts *exts, - const struct tcf_ext_map *map); -extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); -extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, - struct tcf_exts *src); -extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, - const struct tcf_ext_map *map); -extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, - const struct tcf_ext_map *map); +int tcf_exts_validate(struct net *net, struct tcf_proto *tp, + struct nlattr **tb, struct nlattr *rate_tlv, + struct tcf_exts *exts, bool ovr); +void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); +void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, + struct tcf_exts *src); +int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); +int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); /** * struct tcf_pkt_info - packet information */ -struct tcf_pkt_info -{ +struct tcf_pkt_info { unsigned char * ptr; int nexthdr; }; @@ -162,8 +164,7 @@ struct tcf_ematch_ops; * @datalen: length of the ematch specific configuration data * @data: ematch specific data */ -struct tcf_ematch -{ +struct tcf_ematch { struct tcf_ematch_ops * ops; unsigned long data; unsigned int datalen; @@ -211,8 +212,7 @@ static inline int tcf_em_early_end(struct tcf_ematch *em, int result) * @hdr: ematch tree header supplied by userspace * @matches: array of ematches */ -struct tcf_ematch_tree -{ +struct tcf_ematch_tree { struct tcf_ematch_tree_hdr hdr; struct tcf_ematch * matches; @@ -230,8 +230,7 @@ struct tcf_ematch_tree * @owner: owner, must be set to THIS_MODULE * @link: link to previous/next ematch module (internal use) */ -struct tcf_ematch_ops -{ +struct tcf_ematch_ops { int kind; int datalen; int (*change)(struct tcf_proto *, void *, @@ -245,14 +244,14 @@ struct tcf_ematch_ops struct list_head link; }; -extern int tcf_em_register(struct tcf_ematch_ops *); -extern int tcf_em_unregister(struct tcf_ematch_ops *); -extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, - struct tcf_ematch_tree *); -extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); -extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); -extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, - struct tcf_pkt_info *); +int tcf_em_register(struct tcf_ematch_ops *); +void tcf_em_unregister(struct tcf_ematch_ops *); +int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, + struct tcf_ematch_tree *); +void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); +int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); +int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, + struct tcf_pkt_info *); /** * tcf_em_tree_change - replace ematch tree of a running classifier @@ -302,8 +301,7 @@ static inline int tcf_em_tree_match(struct sk_buff *skb, #else /* CONFIG_NET_EMATCH */ -struct tcf_ematch_tree -{ +struct tcf_ematch_tree { }; #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) @@ -331,34 +329,36 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) static inline int tcf_valid_offset(const struct sk_buff *skb, const unsigned char *ptr, const int len) { - return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head); + return likely((ptr + len) <= skb_tail_pointer(skb) && + ptr >= skb->head && + (ptr <= (ptr + len))); } #ifdef CONFIG_NET_CLS_IND #include <net/net_namespace.h> static inline int -tcf_change_indev(struct tcf_proto *tp, char *indev, struct nlattr *indev_tlv) +tcf_change_indev(struct net *net, struct nlattr *indev_tlv) { + char indev[IFNAMSIZ]; + struct net_device *dev; + if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) return -EINVAL; - return 0; + dev = __dev_get_by_name(net, indev); + if (!dev) + return -ENODEV; + return dev->ifindex; } -static inline int -tcf_match_indev(struct sk_buff *skb, char *indev) +static inline bool +tcf_match_indev(struct sk_buff *skb, int ifindex) { - struct net_device *dev; - - if (indev[0]) { - if (!skb->iif) - return 0; - dev = __dev_get_by_index(&init_net, skb->iif); - if (!dev || strcmp(indev, dev->name)) - return 0; - } - - return 1; + if (!ifindex) + return true; + if (!skb->skb_iif) + return false; + return ifindex == skb->skb_iif; } #endif /* CONFIG_NET_CLS_IND */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 46fb4d80c74..ec030cd7661 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -5,15 +5,14 @@ #include <linux/ktime.h> #include <net/sch_generic.h> -struct qdisc_walker -{ +struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); }; -#define QDISC_ALIGNTO 32 +#define QDISC_ALIGNTO 64 #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) static inline void *qdisc_priv(struct Qdisc *q) @@ -33,7 +32,7 @@ static inline void *qdisc_priv(struct Qdisc *q) The result: [34]86 is not good choice for QoS router :-( - The things are not so bad, because we may use artifical + The things are not so bad, because we may use artificial clock evaluated by integration of network data flow in the most critical places. */ @@ -41,16 +40,17 @@ static inline void *qdisc_priv(struct Qdisc *q) typedef u64 psched_time_t; typedef long psched_tdiff_t; -/* Avoid doing 64 bit divide by 1000 */ -#define PSCHED_US2NS(x) ((s64)(x) << 10) -#define PSCHED_NS2US(x) ((x) >> 10) +/* Avoid doing 64 bit divide */ +#define PSCHED_SHIFT 6 +#define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) +#define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) -#define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC) +#define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) #define PSCHED_PASTPERFECT 0 static inline psched_time_t psched_get_time(void) { - return PSCHED_NS2US(ktime_to_ns(ktime_get())); + return PSCHED_NS2TICKS(ktime_to_ns(ktime_get())); } static inline psched_tdiff_t @@ -64,40 +64,60 @@ struct qdisc_watchdog { struct Qdisc *qdisc; }; -extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); -extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, - psched_time_t expires); -extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); - -extern struct Qdisc_ops pfifo_qdisc_ops; -extern struct Qdisc_ops bfifo_qdisc_ops; +void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); +void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); -extern int register_qdisc(struct Qdisc_ops *qops); -extern int unregister_qdisc(struct Qdisc_ops *qops); -extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); -extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); -extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, - struct nlattr *tab); -extern void qdisc_put_rtab(struct qdisc_rate_table *tab); +static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, + psched_time_t expires) +{ + qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); +} -extern void __qdisc_run(struct net_device *dev); +void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); -static inline void qdisc_run(struct net_device *dev) +extern struct Qdisc_ops pfifo_qdisc_ops; +extern struct Qdisc_ops bfifo_qdisc_ops; +extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; + +int fifo_set_limit(struct Qdisc *q, unsigned int limit); +struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, + unsigned int limit); + +int register_qdisc(struct Qdisc_ops *qops); +int unregister_qdisc(struct Qdisc_ops *qops); +void qdisc_get_default(char *id, size_t len); +int qdisc_set_default(const char *id); + +void qdisc_list_add(struct Qdisc *q); +void qdisc_list_del(struct Qdisc *q); +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); +struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); +struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, + struct nlattr *tab); +void qdisc_put_rtab(struct qdisc_rate_table *tab); +void qdisc_put_stab(struct qdisc_size_table *tab); +void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); +int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock); + +void __qdisc_run(struct Qdisc *q); + +static inline void qdisc_run(struct Qdisc *q) { - if (!netif_queue_stopped(dev) && - !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) - __qdisc_run(dev); + if (qdisc_run_begin(q)) + __qdisc_run(q); } -extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, - struct tcf_result *res); -extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, +int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res); +int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res); /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ -static inline unsigned psched_mtu(const struct net_device *dev) +static inline unsigned int psched_mtu(const struct net_device *dev) { return dev->mtu + dev->hard_header_len; } diff --git a/include/net/protocol.h b/include/net/protocol.h index ad8c584233a..d6fcc1fcdb5 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -25,46 +25,56 @@ #define _PROTOCOL_H #include <linux/in6.h> -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#include <linux/skbuff.h> +#if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif +#include <linux/netdevice.h> -#define MAX_INET_PROTOS 256 /* Must be a power of 2 */ - +/* This is one larger than the largest protocol value that can be + * found in an ipv4 or ipv6 header. Since in both cases the protocol + * value is presented in a __u8, this is defined to be 256. + */ +#define MAX_INET_PROTOS 256 /* This is used to register protocols. */ struct net_protocol { + void (*early_demux)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); - int (*gso_send_check)(struct sk_buff *skb); - struct sk_buff *(*gso_segment)(struct sk_buff *skb, - int features); - int no_policy; + unsigned int no_policy:1, + netns_ok:1, + /* does the protocol do more stringent + * icmp tag validation than simple + * socket lookup? + */ + icmp_strict_tag_validation:1; }; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) -struct inet6_protocol -{ +#if IS_ENABLED(CONFIG_IPV6) +struct inet6_protocol { + void (*early_demux)(struct sk_buff *skb); + int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, - int type, int code, int offset, + u8 type, u8 code, int offset, __be32 info); - - int (*gso_send_check)(struct sk_buff *skb); - struct sk_buff *(*gso_segment)(struct sk_buff *skb, - int features); - unsigned int flags; /* INET6_PROTO_xxx */ }; #define INET6_PROTO_NOPOLICY 0x1 #define INET6_PROTO_FINAL 0x2 -/* This should be set for any extension header which is compatible with GSO. */ -#define INET6_PROTO_GSO_EXTHDR 0x4 #endif +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; /* Flags used by IPv6 for now */ +}; +/* This should be set for any extension header which is compatible with GSO. */ +#define INET6_PROTO_GSO_EXTHDR 0x1 + /* This is used to register socket interfaces for IP protocols. */ struct inet_protosw { struct list_head list; @@ -76,34 +86,37 @@ struct inet_protosw { struct proto *prot; const struct proto_ops *ops; - int capability; /* Which (if any) capability do - * we need to use this socket - * interface? - */ - char no_check; /* checksum on rcv/xmit/none? */ unsigned char flags; /* See INET_PROTOSW_* below. */ }; #define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */ #define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */ #define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */ -extern struct net_protocol *inet_protocol_base; -extern struct net_protocol *inet_protos[MAX_INET_PROTOS]; +extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; +extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS]; +extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS]; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) -extern struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; +#if IS_ENABLED(CONFIG_IPV6) +extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; #endif -extern int inet_add_protocol(struct net_protocol *prot, unsigned char num); -extern int inet_del_protocol(struct net_protocol *prot, unsigned char num); -extern void inet_register_protosw(struct inet_protosw *p); -extern void inet_unregister_protosw(struct inet_protosw *p); - -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) -extern int inet6_add_protocol(struct inet6_protocol *prot, unsigned char num); -extern int inet6_del_protocol(struct inet6_protocol *prot, unsigned char num); -extern int inet6_register_protosw(struct inet_protosw *p); -extern void inet6_unregister_protosw(struct inet_protosw *p); +int inet_add_protocol(const struct net_protocol *prot, unsigned char num); +int inet_del_protocol(const struct net_protocol *prot, unsigned char num); +int inet_add_offload(const struct net_offload *prot, unsigned char num); +int inet_del_offload(const struct net_offload *prot, unsigned char num); +void inet_register_protosw(struct inet_protosw *p); +void inet_unregister_protosw(struct inet_protosw *p); + +int udp_add_offload(struct udp_offload *prot); +void udp_del_offload(struct udp_offload *prot); + +#if IS_ENABLED(CONFIG_IPV6) +int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); +int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); +int inet6_register_protosw(struct inet_protosw *p); +void inet6_unregister_protosw(struct inet_protosw *p); #endif +int inet6_add_offload(const struct net_offload *prot, unsigned char num); +int inet6_del_offload(const struct net_offload *prot, unsigned char num); #endif /* _PROTOCOL_H */ diff --git a/include/net/psnap.h b/include/net/psnap.h index b2e01cc3fc8..78db4cc1306 100644 --- a/include/net/psnap.h +++ b/include/net/psnap.h @@ -1,7 +1,11 @@ #ifndef _NET_PSNAP_H #define _NET_PSNAP_H -extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *orig_dev)); -extern void unregister_snap_client(struct datalink_proto *proto); +struct datalink_proto * +register_snap_client(const unsigned char *desc, + int (*rcvfunc)(struct sk_buff *, struct net_device *, + struct packet_type *, + struct net_device *orig_dev)); +void unregister_snap_client(struct datalink_proto *proto); #endif diff --git a/include/net/raw.h b/include/net/raw.h index 1828f81fe37..6a40c6562dd 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -19,13 +19,14 @@ #include <net/protocol.h> +#include <linux/icmp.h> extern struct proto raw_prot; void raw_icmp_error(struct sk_buff *, int, u32); int raw_local_deliver(struct sk_buff *, int); -extern int raw_rcv(struct sock *, struct sk_buff *); +int raw_rcv(struct sock *, struct sk_buff *); #define RAW_HTABLE_SIZE MAX_INET_PROTOS @@ -35,8 +36,8 @@ struct raw_hashinfo { }; #ifdef CONFIG_PROC_FS -extern int raw_proc_init(void); -extern void raw_proc_exit(void); +int raw_proc_init(void); +void raw_proc_exit(void); struct raw_iter_state { struct seq_net_private p; @@ -44,7 +45,10 @@ struct raw_iter_state { struct raw_hashinfo *h; }; -#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private) +static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq) +{ + return seq->private; +} void *raw_seq_start(struct seq_file *seq, loff_t *pos); void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); void raw_seq_stop(struct seq_file *seq, void *v); @@ -53,7 +57,19 @@ int raw_seq_open(struct inode *ino, struct file *file, #endif -void raw_hash_sk(struct sock *sk, struct raw_hashinfo *h); -void raw_unhash_sk(struct sock *sk, struct raw_hashinfo *h); +void raw_hash_sk(struct sock *sk); +void raw_unhash_sk(struct sock *sk); + +struct raw_sock { + /* inet_sock has to be the first member */ + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +static inline struct raw_sock *raw_sk(const struct sock *sk) +{ + return (struct raw_sock *)sk; +} #endif /* _RAW_H */ diff --git a/include/net/rawv6.h b/include/net/rawv6.h index 8a22599f26b..87783dea079 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -1,16 +1,13 @@ #ifndef _NET_RAWV6_H #define _NET_RAWV6_H -#ifdef __KERNEL__ - #include <net/protocol.h> void raw6_icmp_error(struct sk_buff *, int nexthdr, - int type, int code, int inner_offset, __be32); -int raw6_local_deliver(struct sk_buff *, int); + u8 type, u8 code, int inner_offset, __be32); +bool raw6_local_deliver(struct sk_buff *, int); -extern int rawv6_rcv(struct sock *sk, - struct sk_buff *skb); +int rawv6_rcv(struct sock *sk, struct sk_buff *skb); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) int rawv6_mh_filter_register(int (*filter)(struct sock *sock, @@ -20,5 +17,3 @@ int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock, #endif #endif - -#endif diff --git a/include/net/red.h b/include/net/red.h index 3cf31d466a8..76e0b5f922c 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -2,9 +2,11 @@ #define __NET_SCHED_RED_H #include <linux/types.h> +#include <linux/bug.h> #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/dsfield.h> +#include <linux/reciprocal_div.h> /* Random Early Detection (RED) algorithm. ======================================= @@ -87,99 +89,151 @@ etc. */ +/* + * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM + * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001 + * + * Every 500 ms: + * if (avg > target and max_p <= 0.5) + * increase max_p : max_p += alpha; + * else if (avg < target and max_p >= 0.01) + * decrease max_p : max_p *= beta; + * + * target :[qth_min + 0.4*(qth_min - qth_max), + * qth_min + 0.6*(qth_min - qth_max)]. + * alpha : min(0.01, max_p / 4) + * beta : 0.9 + * max_P is a Q0.32 fixed point number (with 32 bits mantissa) + * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ] + */ +#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100)) + +#define MAX_P_MIN (1 * RED_ONE_PERCENT) +#define MAX_P_MAX (50 * RED_ONE_PERCENT) +#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4) + #define RED_STAB_SIZE 256 #define RED_STAB_MASK (RED_STAB_SIZE - 1) -struct red_stats -{ +struct red_stats { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ u32 forced_drop; /* Forced drops, qavg > max_thresh */ u32 forced_mark; /* Forced marks, qavg > max_thresh */ u32 pdrop; /* Drops due to queue limits */ u32 other; /* Drops due to drop() calls */ - u32 backlog; }; -struct red_parms -{ +struct red_parms { /* Parameters */ - u32 qth_min; /* Min avg length threshold: A scaled */ - u32 qth_max; /* Max avg length threshold: A scaled */ + u32 qth_min; /* Min avg length threshold: Wlog scaled */ + u32 qth_max; /* Max avg length threshold: Wlog scaled */ u32 Scell_max; - u32 Rmask; /* Cached random mask, see red_rmask */ + u32 max_P; /* probability, [0 .. 1.0] 32 scaled */ + /* reciprocal_value(max_P / qth_delta) */ + struct reciprocal_value max_P_reciprocal; + u32 qth_delta; /* max_th - min_th */ + u32 target_min; /* min_th + 0.4*(max_th - min_th) */ + u32 target_max; /* min_th + 0.6*(max_th - min_th) */ u8 Scell_log; u8 Wlog; /* log(W) */ u8 Plog; /* random number bits */ u8 Stab[RED_STAB_SIZE]; +}; +struct red_vars { /* Variables */ int qcount; /* Number of packets since last random number generation */ u32 qR; /* Cached random number */ - unsigned long qavg; /* Average queue length: A scaled */ - psched_time_t qidlestart; /* Start of current idle period */ + unsigned long qavg; /* Average queue length: Wlog scaled */ + ktime_t qidlestart; /* Start of current idle period */ }; -static inline u32 red_rmask(u8 Plog) +static inline u32 red_maxp(u8 Plog) { - return Plog < 32 ? ((1 << Plog) - 1) : ~0UL; + return Plog < 32 ? (~0U >> Plog) : ~0U; } -static inline void red_set_parms(struct red_parms *p, - u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, - u8 Scell_log, u8 *stab) +static inline void red_set_vars(struct red_vars *v) { /* Reset average queue length, the value is strictly bound * to the parameters below, reseting hurts a bit but leaving * it might result in an unreasonable qavg for a while. --TGR */ - p->qavg = 0; + v->qavg = 0; + + v->qcount = -1; +} + +static inline void red_set_parms(struct red_parms *p, + u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, + u8 Scell_log, u8 *stab, u32 max_P) +{ + int delta = qth_max - qth_min; + u32 max_p_delta; - p->qcount = -1; p->qth_min = qth_min << Wlog; p->qth_max = qth_max << Wlog; p->Wlog = Wlog; p->Plog = Plog; - p->Rmask = red_rmask(Plog); + if (delta < 0) + delta = 1; + p->qth_delta = delta; + if (!max_P) { + max_P = red_maxp(Plog); + max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ + } + p->max_P = max_P; + max_p_delta = max_P / delta; + max_p_delta = max(max_p_delta, 1U); + p->max_P_reciprocal = reciprocal_value(max_p_delta); + + /* RED Adaptative target : + * [min_th + 0.4*(min_th - max_th), + * min_th + 0.6*(min_th - max_th)]. + */ + delta /= 5; + p->target_min = qth_min + 2*delta; + p->target_max = qth_min + 3*delta; + p->Scell_log = Scell_log; p->Scell_max = (255 << Scell_log); - memcpy(p->Stab, stab, sizeof(p->Stab)); + if (stab) + memcpy(p->Stab, stab, sizeof(p->Stab)); } -static inline int red_is_idling(struct red_parms *p) +static inline int red_is_idling(const struct red_vars *v) { - return p->qidlestart != PSCHED_PASTPERFECT; + return v->qidlestart.tv64 != 0; } -static inline void red_start_of_idle_period(struct red_parms *p) +static inline void red_start_of_idle_period(struct red_vars *v) { - p->qidlestart = psched_get_time(); + v->qidlestart = ktime_get(); } -static inline void red_end_of_idle_period(struct red_parms *p) +static inline void red_end_of_idle_period(struct red_vars *v) { - p->qidlestart = PSCHED_PASTPERFECT; + v->qidlestart.tv64 = 0; } -static inline void red_restart(struct red_parms *p) +static inline void red_restart(struct red_vars *v) { - red_end_of_idle_period(p); - p->qavg = 0; - p->qcount = -1; + red_end_of_idle_period(v); + v->qavg = 0; + v->qcount = -1; } -static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) +static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p, + const struct red_vars *v) { - psched_time_t now; - long us_idle; + s64 delta = ktime_us_delta(ktime_get(), v->qidlestart); + long us_idle = min_t(s64, delta, p->Scell_max); int shift; - now = psched_get_time(); - us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); - /* * The problem: ideally, average length queue recalcultion should * be done over constant clock intervals. This is too expensive, so @@ -192,7 +246,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) * * dummy packets as a burst after idle time, i.e. * - * p->qavg *= (1-W)^m + * v->qavg *= (1-W)^m * * This is an apparently overcomplicated solution (f.e. we have to * precompute a table to make this calculation in reasonable time) @@ -203,7 +257,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; if (shift) - return p->qavg >> shift; + return v->qavg >> shift; else { /* Approximate initial part of exponent with linear function: * @@ -212,20 +266,21 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) * Seems, it is the best solution to * problem of too coarse exponent tabulation. */ - us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; + us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log; - if (us_idle < (p->qavg >> 1)) - return p->qavg - us_idle; + if (us_idle < (v->qavg >> 1)) + return v->qavg - us_idle; else - return p->qavg >> 1; + return v->qavg >> 1; } } -static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, +static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, + const struct red_vars *v, unsigned int backlog) { /* - * NOTE: p->qavg is fixed point number with point at Wlog. + * NOTE: v->qavg is fixed point number with point at Wlog. * The formula below is equvalent to floating point * version: * @@ -233,42 +288,46 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, * * --ANK (980924) */ - return p->qavg + (backlog - (p->qavg >> p->Wlog)); + return v->qavg + (backlog - (v->qavg >> p->Wlog)); } -static inline unsigned long red_calc_qavg(struct red_parms *p, +static inline unsigned long red_calc_qavg(const struct red_parms *p, + const struct red_vars *v, unsigned int backlog) { - if (!red_is_idling(p)) - return red_calc_qavg_no_idle_time(p, backlog); + if (!red_is_idling(v)) + return red_calc_qavg_no_idle_time(p, v, backlog); else - return red_calc_qavg_from_idle_time(p); + return red_calc_qavg_from_idle_time(p, v); } -static inline u32 red_random(struct red_parms *p) + +static inline u32 red_random(const struct red_parms *p) { - return net_random() & p->Rmask; + return reciprocal_divide(prandom_u32(), p->max_P_reciprocal); } -static inline int red_mark_probability(struct red_parms *p, unsigned long qavg) +static inline int red_mark_probability(const struct red_parms *p, + const struct red_vars *v, + unsigned long qavg) { /* The formula used below causes questions. - OK. qR is random number in the interval 0..Rmask + OK. qR is random number in the interval + (0..1/max_P)*(qth_max-qth_min) i.e. 0..(2^Plog). If we used floating point arithmetics, it would be: (2^Plog)*rnd_num, where rnd_num is less 1. Taking into account, that qavg have fixed - point at Wlog, and Plog is related to max_P by - max_P = (qth_max-qth_min)/2^Plog; two lines + point at Wlog, two lines below have the following floating point equivalent: max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount Any questions? --ANK (980924) */ - return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); + return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR); } enum { @@ -277,7 +336,7 @@ enum { RED_ABOVE_MAX_TRESH, }; -static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) +static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg) { if (qavg < p->qth_min) return RED_BELOW_MIN_THRESH; @@ -293,27 +352,29 @@ enum { RED_HARD_MARK, }; -static inline int red_action(struct red_parms *p, unsigned long qavg) +static inline int red_action(const struct red_parms *p, + struct red_vars *v, + unsigned long qavg) { switch (red_cmp_thresh(p, qavg)) { case RED_BELOW_MIN_THRESH: - p->qcount = -1; + v->qcount = -1; return RED_DONT_MARK; case RED_BETWEEN_TRESH: - if (++p->qcount) { - if (red_mark_probability(p, qavg)) { - p->qcount = 0; - p->qR = red_random(p); + if (++v->qcount) { + if (red_mark_probability(p, v, qavg)) { + v->qcount = 0; + v->qR = red_random(p); return RED_PROB_MARK; } } else - p->qR = red_random(p); + v->qR = red_random(p); return RED_DONT_MARK; case RED_ABOVE_MAX_TRESH: - p->qcount = -1; + v->qcount = -1; return RED_HARD_MARK; } @@ -321,4 +382,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg) return RED_DONT_MARK; } +static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) +{ + unsigned long qavg; + u32 max_p_delta; + + qavg = v->qavg; + if (red_is_idling(v)) + qavg = red_calc_qavg_from_idle_time(p, v); + + /* v->qavg is fixed point number with point at Wlog */ + qavg >>= p->Wlog; + + if (qavg > p->target_max && p->max_P <= MAX_P_MAX) + p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */ + else if (qavg < p->target_min && p->max_P >= MAX_P_MIN) + p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */ + + max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta); + max_p_delta = max(max_p_delta, 1U); + p->max_P_reciprocal = reciprocal_value(max_p_delta); +} #endif diff --git a/include/net/regulatory.h b/include/net/regulatory.h new file mode 100644 index 00000000000..259992444e8 --- /dev/null +++ b/include/net/regulatory.h @@ -0,0 +1,196 @@ +#ifndef __NET_REGULATORY_H +#define __NET_REGULATORY_H +/* + * regulatory support structures + * + * Copyright 2008-2009 Luis R. Rodriguez <mcgrof@qca.qualcomm.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/rcupdate.h> + +/** + * enum environment_cap - Environment parsed from country IE + * @ENVIRON_ANY: indicates country IE applies to both indoor and + * outdoor operation. + * @ENVIRON_INDOOR: indicates country IE applies only to indoor operation + * @ENVIRON_OUTDOOR: indicates country IE applies only to outdoor operation + */ +enum environment_cap { + ENVIRON_ANY, + ENVIRON_INDOOR, + ENVIRON_OUTDOOR, +}; + +/** + * struct regulatory_request - used to keep track of regulatory requests + * + * @rcu_head: RCU head struct used to free the request + * @wiphy_idx: this is set if this request's initiator is + * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This + * can be used by the wireless core to deal with conflicts + * and potentially inform users of which devices specifically + * cased the conflicts. + * @initiator: indicates who sent this request, could be any of + * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) + * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested + * regulatory domain. We have a few special codes: + * 00 - World regulatory domain + * 99 - built by driver but a specific alpha2 cannot be determined + * 98 - result of an intersection between two regulatory domains + * 97 - regulatory domain has not yet been configured + * @dfs_region: If CRDA responded with a regulatory domain that requires + * DFS master operation on a known DFS region (NL80211_DFS_*), + * dfs_region represents that region. Drivers can use this and the + * @alpha2 to adjust their device's DFS parameters as required. + * @user_reg_hint_type: if the @initiator was of type + * %NL80211_REGDOM_SET_BY_USER, this classifies the type + * of hint passed. This could be any of the %NL80211_USER_REG_HINT_* + * types. + * @intersect: indicates whether the wireless core should intersect + * the requested regulatory domain with the presently set regulatory + * domain. + * @processed: indicates whether or not this requests has already been + * processed. When the last request is processed it means that the + * currently regulatory domain set on cfg80211 is updated from + * CRDA and can be used by other regulatory requests. When a + * the last request is not yet processed we must yield until it + * is processed before processing any new requests. + * @country_ie_checksum: checksum of the last processed and accepted + * country IE + * @country_ie_env: lets us know if the AP is telling us we are outdoor, + * indoor, or if it doesn't matter + * @list: used to insert into the reg_requests_list linked list + */ +struct regulatory_request { + struct rcu_head rcu_head; + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; + char alpha2[2]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; + enum environment_cap country_ie_env; + struct list_head list; +}; + +/** + * enum ieee80211_regulatory_flags - device regulatory flags + * + * @REGULATORY_CUSTOM_REG: tells us the driver for this device + * has its own custom regulatory domain and cannot identify the + * ISO / IEC 3166 alpha2 it belongs to. When this is enabled + * we will disregard the first regulatory hint (when the + * initiator is %REGDOM_SET_BY_CORE). Drivers that use + * wiphy_apply_custom_regulatory() should have this flag set + * or the regulatory core will set it for the wiphy. + * If you use regulatory_hint() *after* using + * wiphy_apply_custom_regulatory() the wireless core will + * clear the REGULATORY_CUSTOM_REG for your wiphy as it would be + * implied that the device somehow gained knowledge of its region. + * @REGULATORY_STRICT_REG: tells us that the wiphy for this device + * has regulatory domain that it wishes to be considered as the + * superset for regulatory rules. After this device gets its regulatory + * domain programmed further regulatory hints shall only be considered + * for this device to enhance regulatory compliance, forcing the + * device to only possibly use subsets of the original regulatory + * rules. For example if channel 13 and 14 are disabled by this + * device's regulatory domain no user specified regulatory hint which + * has these channels enabled would enable them for this wiphy, + * the device's original regulatory domain will be trusted as the + * base. You can program the superset of regulatory rules for this + * wiphy with regulatory_hint() for cards programmed with an + * ISO3166-alpha2 country code. wiphys that use regulatory_hint() + * will have their wiphy->regd programmed once the regulatory + * domain is set, and all other regulatory hints will be ignored + * until their own regulatory domain gets programmed. + * @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to + * ensure that passive scan flags and beaconing flags may not be lifted by + * cfg80211 due to regulatory beacon hints. For more information on beacon + * hints read the documenation for regulatory_hint_found_beacon() + * @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference + * that even though they may have programmed their own custom power + * setting prior to wiphy registration, they want to ensure their channel + * power settings are updated for this connection with the power settings + * derived from the regulatory domain. The regulatory domain used will be + * based on the ISO3166-alpha2 from country IE provided through + * regulatory_hint_country_ie() + * @REGULATORY_COUNTRY_IE_IGNORE: for devices that have a preference to ignore + * all country IE information processed by the regulatory core. This will + * override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will + * be ignored. + * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the + * NO_IR relaxation, which enables transmissions on channels on which + * otherwise initiating radiation is not allowed. This will enable the + * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration + * option + */ +enum ieee80211_regulatory_flags { + REGULATORY_CUSTOM_REG = BIT(0), + REGULATORY_STRICT_REG = BIT(1), + REGULATORY_DISABLE_BEACON_HINTS = BIT(2), + REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), + REGULATORY_COUNTRY_IE_IGNORE = BIT(4), + REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + u32 flags; + u32 dfs_cac_ms; +}; + +struct ieee80211_regdomain { + struct rcu_head rcu_head; + u32 n_reg_rules; + char alpha2[2]; + enum nl80211_dfs_regions dfs_region; + struct ieee80211_reg_rule reg_rules[]; +}; + +#define MHZ_TO_KHZ(freq) ((freq) * 1000) +#define KHZ_TO_MHZ(freq) ((freq) / 1000) +#define DBI_TO_MBI(gain) ((gain) * 100) +#define MBI_TO_DBI(gain) ((gain) / 100) +#define DBM_TO_MBM(gain) ((gain) * 100) +#define MBM_TO_DBM(gain) ((gain) / 100) + +#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \ +{ \ + .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ + .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \ + .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \ + .power_rule.max_antenna_gain = DBI_TO_MBI(gain), \ + .power_rule.max_eirp = DBM_TO_MBM(eirp), \ + .flags = reg_flags, \ + .dfs_cac_ms = dfs_cac, \ +} + +#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \ + REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags) + +#endif diff --git a/include/net/request_sock.h b/include/net/request_sock.h index cff4608179c..7f830ff67f0 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <linux/bug.h> #include <net/sock.h> @@ -30,23 +31,29 @@ struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; + char *slab_name; int (*rtx_syn_ack)(struct sock *sk, - struct request_sock *req, - struct dst_entry *dst); - void (*send_ack)(struct sk_buff *skb, + struct request_sock *req); + void (*send_ack)(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void (*send_reset)(struct sock *sk, struct sk_buff *skb); void (*destructor)(struct request_sock *req); + void (*syn_ack_timeout)(struct sock *sk, + struct request_sock *req); }; +int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); + /* struct request_sock - mini sock to represent a connection request */ struct request_sock { - struct request_sock *dl_next; /* Must be first member! */ + struct sock_common __req_common; + struct request_sock *dl_next; u16 mss; - u8 retrans; - u8 __pad; + u8 num_retrans; /* number of retransmits */ + u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ + u8 num_timeout:7; /* number of timeouts */ /* The following two fields can be easily recomputed I think -AK */ u32 window_clamp; /* window clamp at creation time */ u32 rcv_wnd; /* rcv_wnd offered first time */ @@ -87,7 +94,8 @@ extern int sysctl_max_syn_backlog; */ struct listen_sock { u8 max_qlen_log; - /* 3 bytes hole, try to use */ + u8 synflood_warned; + /* 2 bytes hole, try to use */ int qlen; int qlen_young; int clock_hand; @@ -96,6 +104,34 @@ struct listen_sock { struct request_sock *syn_table[0]; }; +/* + * For a TCP Fast Open listener - + * lock - protects the access to all the reqsk, which is co-owned by + * the listener and the child socket. + * qlen - pending TFO requests (still in TCP_SYN_RECV). + * max_qlen - max TFO reqs allowed before TFO is disabled. + * + * XXX (TFO) - ideally these fields can be made as part of "listen_sock" + * structure above. But there is some implementation difficulty due to + * listen_sock being part of request_sock_queue hence will be freed when + * a listener is stopped. But TFO related fields may continue to be + * accessed even after a listener is closed, until its sk_refcnt drops + * to 0 implying no more outstanding TFO reqs. One solution is to keep + * listen_opt around until sk_refcnt drops to 0. But there is some other + * complexity that needs to be resolved. E.g., a listener can be disabled + * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. + */ +struct fastopen_queue { + struct request_sock *rskq_rst_head; /* Keep track of past TFO */ + struct request_sock *rskq_rst_tail; /* requests that caused RST. + * This is part of the defense + * against spoofing attack. + */ + spinlock_t lock; + int qlen; /* # of pending (TCP_SYN_RECV) reqs */ + int max_qlen; /* != 0 iff TFO is currently enabled */ +}; + /** struct request_sock_queue - queue of request_socks * * @rskq_accept_head - FIFO head of established children @@ -119,13 +155,21 @@ struct request_sock_queue { u8 rskq_defer_accept; /* 3 bytes hole, try to pack */ struct listen_sock *listen_opt; + struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been + * enabled on this listener. Check + * max_qlen != 0 in fastopen_queue + * to determine if TFO is enabled + * right at this moment. + */ }; -extern int reqsk_queue_alloc(struct request_sock_queue *queue, - unsigned int nr_table_entries); +int reqsk_queue_alloc(struct request_sock_queue *queue, + unsigned int nr_table_entries); -extern void __reqsk_queue_destroy(struct request_sock_queue *queue); -extern void reqsk_queue_destroy(struct request_sock_queue *queue); +void __reqsk_queue_destroy(struct request_sock_queue *queue); +void reqsk_queue_destroy(struct request_sock_queue *queue); +void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, + bool reset); static inline struct request_sock * reqsk_queue_yank_acceptq(struct request_sock_queue *queue) @@ -171,7 +215,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue { struct request_sock *req = queue->rskq_accept_head; - BUG_TRAP(req != NULL); + WARN_ON(req == NULL); queue->rskq_accept_head = req->dl_next; if (queue->rskq_accept_head == NULL) @@ -180,25 +224,12 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue return req; } -static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue, - struct sock *parent) -{ - struct request_sock *req = reqsk_queue_remove(queue); - struct sock *child = req->sk; - - BUG_TRAP(child != NULL); - - sk_acceptq_removed(parent); - __reqsk_free(req); - return child; -} - static inline int reqsk_queue_removed(struct request_sock_queue *queue, struct request_sock *req) { struct listen_sock *lopt = queue->listen_opt; - if (req->retrans == 0) + if (req->num_timeout == 0) --lopt->qlen_young; return --lopt->qlen; @@ -236,7 +267,8 @@ static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, struct listen_sock *lopt = queue->listen_opt; req->expires = jiffies + timeout; - req->retrans = 0; + req->num_retrans = 0; + req->num_timeout = 0; req->sk = NULL; req->dl_next = lopt->syn_table[hash]; diff --git a/include/net/rose.h b/include/net/rose.h index e5bb084d875..50811fe2c58 100644 --- a/include/net/rose.h +++ b/include/net/rose.h @@ -14,6 +14,12 @@ #define ROSE_MIN_LEN 3 +#define ROSE_CALL_REQ_ADDR_LEN_OFF 3 +#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */ +#define ROSE_CALL_REQ_DEST_ADDR_OFF 4 +#define ROSE_CALL_REQ_SRC_ADDR_OFF 9 +#define ROSE_CALL_REQ_FACILITIES_OFF 14 + #define ROSE_GFI 0x10 #define ROSE_Q_BIT 0x80 #define ROSE_D_BIT 0x40 @@ -154,38 +160,42 @@ extern int sysctl_rose_routing_control; extern int sysctl_rose_link_fail_timeout; extern int sysctl_rose_maximum_vcs; extern int sysctl_rose_window_size; -extern int rosecmp(rose_address *, rose_address *); -extern int rosecmpm(rose_address *, rose_address *, unsigned short); -extern const char *rose2asc(const rose_address *); -extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *); -extern void rose_kill_by_neigh(struct rose_neigh *); -extern unsigned int rose_new_lci(struct rose_neigh *); -extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int); -extern void rose_destroy_socket(struct sock *); + +int rosecmp(rose_address *, rose_address *); +int rosecmpm(rose_address *, rose_address *, unsigned short); +char *rose2asc(char *buf, const rose_address *); +struct sock *rose_find_socket(unsigned int, struct rose_neigh *); +void rose_kill_by_neigh(struct rose_neigh *); +unsigned int rose_new_lci(struct rose_neigh *); +int rose_rx_call_request(struct sk_buff *, struct net_device *, + struct rose_neigh *, unsigned int); +void rose_destroy_socket(struct sock *); /* rose_dev.c */ -extern void rose_setup(struct net_device *); +void rose_setup(struct net_device *); /* rose_in.c */ -extern int rose_process_rx_frame(struct sock *, struct sk_buff *); +int rose_process_rx_frame(struct sock *, struct sk_buff *); /* rose_link.c */ -extern void rose_start_ftimer(struct rose_neigh *); -extern void rose_stop_ftimer(struct rose_neigh *); -extern void rose_stop_t0timer(struct rose_neigh *); -extern int rose_ftimer_running(struct rose_neigh *); -extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short); -extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char); -extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *); +void rose_start_ftimer(struct rose_neigh *); +void rose_stop_ftimer(struct rose_neigh *); +void rose_stop_t0timer(struct rose_neigh *); +int rose_ftimer_running(struct rose_neigh *); +void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, + unsigned short); +void rose_transmit_clear_request(struct rose_neigh *, unsigned int, + unsigned char, unsigned char); +void rose_transmit_link(struct sk_buff *, struct rose_neigh *); /* rose_loopback.c */ -extern void rose_loopback_init(void); -extern void rose_loopback_clear(void); -extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *); +void rose_loopback_init(void); +void rose_loopback_clear(void); +int rose_loopback_queue(struct sk_buff *, struct rose_neigh *); /* rose_out.c */ -extern void rose_kick(struct sock *); -extern void rose_enquiry_response(struct sock *); +void rose_kick(struct sock *); +void rose_enquiry_response(struct sock *); /* rose_route.c */ extern struct rose_neigh *rose_loopback_neigh; @@ -193,43 +203,45 @@ extern const struct file_operations rose_neigh_fops; extern const struct file_operations rose_nodes_fops; extern const struct file_operations rose_routes_fops; -extern void rose_add_loopback_neigh(void); -extern int __must_check rose_add_loopback_node(rose_address *); -extern void rose_del_loopback_node(rose_address *); -extern void rose_rt_device_down(struct net_device *); -extern void rose_link_device_down(struct net_device *); -extern struct net_device *rose_dev_first(void); -extern struct net_device *rose_dev_get(rose_address *); -extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *); -extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *); -extern int rose_rt_ioctl(unsigned int, void __user *); -extern void rose_link_failed(ax25_cb *, int); -extern int rose_route_frame(struct sk_buff *, ax25_cb *); -extern void rose_rt_free(void); +void rose_add_loopback_neigh(void); +int __must_check rose_add_loopback_node(rose_address *); +void rose_del_loopback_node(rose_address *); +void rose_rt_device_down(struct net_device *); +void rose_link_device_down(struct net_device *); +struct net_device *rose_dev_first(void); +struct net_device *rose_dev_get(rose_address *); +struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *); +struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, + unsigned char *, int); +int rose_rt_ioctl(unsigned int, void __user *); +void rose_link_failed(ax25_cb *, int); +int rose_route_frame(struct sk_buff *, ax25_cb *); +void rose_rt_free(void); /* rose_subr.c */ -extern void rose_clear_queues(struct sock *); -extern void rose_frames_acked(struct sock *, unsigned short); -extern void rose_requeue_frames(struct sock *); -extern int rose_validate_nr(struct sock *, unsigned short); -extern void rose_write_internal(struct sock *, int); -extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *); -extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *); -extern void rose_disconnect(struct sock *, int, int, int); +void rose_clear_queues(struct sock *); +void rose_frames_acked(struct sock *, unsigned short); +void rose_requeue_frames(struct sock *); +int rose_validate_nr(struct sock *, unsigned short); +void rose_write_internal(struct sock *, int); +int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *); +int rose_parse_facilities(unsigned char *, unsigned int, + struct rose_facilities_struct *); +void rose_disconnect(struct sock *, int, int, int); /* rose_timer.c */ -extern void rose_start_heartbeat(struct sock *); -extern void rose_start_t1timer(struct sock *); -extern void rose_start_t2timer(struct sock *); -extern void rose_start_t3timer(struct sock *); -extern void rose_start_hbtimer(struct sock *); -extern void rose_start_idletimer(struct sock *); -extern void rose_stop_heartbeat(struct sock *); -extern void rose_stop_timer(struct sock *); -extern void rose_stop_idletimer(struct sock *); +void rose_start_heartbeat(struct sock *); +void rose_start_t1timer(struct sock *); +void rose_start_t2timer(struct sock *); +void rose_start_t3timer(struct sock *); +void rose_start_hbtimer(struct sock *); +void rose_start_idletimer(struct sock *); +void rose_stop_heartbeat(struct sock *); +void rose_stop_timer(struct sock *); +void rose_stop_idletimer(struct sock *); /* sysctl_net_rose.c */ -extern void rose_register_sysctl(void); -extern void rose_unregister_sysctl(void); +void rose_register_sysctl(void); +void rose_unregister_sysctl(void); #endif diff --git a/include/net/route.h b/include/net/route.h index eadad590142..b17cf28f996 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -27,113 +27,182 @@ #include <net/dst.h> #include <net/inetpeer.h> #include <net/flow.h> -#include <net/sock.h> +#include <net/inet_sock.h> #include <linux/in_route.h> #include <linux/rtnetlink.h> +#include <linux/rcupdate.h> #include <linux/route.h> #include <linux/ip.h> #include <linux/cache.h> #include <linux/security.h> -#include <net/sock.h> -#ifndef __KERNEL__ -#warning This file is not supposed to be used outside of kernel. -#endif +/* IPv4 datagram length is stored into 16bit field (tot_len) */ +#define IP_MAX_MTU 0xFFFFU #define RTO_ONLINK 0x01 -#define RTO_CONN 0 -/* RTO_CONN is not used (being alias for 0), but preserved not to break - * some modules referring to it. */ - #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) +#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) struct fib_nh; -struct inet_peer; -struct rtable -{ - union - { - struct dst_entry dst; - } u; +struct fib_info; +struct rtable { + struct dst_entry dst; - /* Cache lookup keys */ - struct flowi fl; - - struct in_device *idev; - int rt_genid; - unsigned rt_flags; + unsigned int rt_flags; __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; - __be32 rt_dst; /* Path destination */ - __be32 rt_src; /* Path source */ int rt_iif; /* Info on neighbour */ __be32 rt_gateway; /* Miscellaneous cached information */ - __be32 rt_spec_dst; /* RFC1122 specific destination */ - struct inet_peer *peer; /* long-living peer info */ + u32 rt_pmtu; + + struct list_head rt_uncached; }; -struct ip_rt_acct +static inline bool rt_is_input_route(const struct rtable *rt) +{ + return rt->rt_is_input != 0; +} + +static inline bool rt_is_output_route(const struct rtable *rt) { + return rt->rt_is_input == 0; +} + +static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr) +{ + if (rt->rt_gateway) + return rt->rt_gateway; + return daddr; +} + +struct ip_rt_acct { __u32 o_bytes; __u32 o_packets; __u32 i_bytes; __u32 i_packets; }; -struct rt_cache_stat -{ - unsigned int in_hit; +struct rt_cache_stat { unsigned int in_slow_tot; unsigned int in_slow_mc; unsigned int in_no_route; unsigned int in_brd; unsigned int in_martian_dst; unsigned int in_martian_src; - unsigned int out_hit; unsigned int out_slow_tot; unsigned int out_slow_mc; - unsigned int gc_total; - unsigned int gc_ignored; - unsigned int gc_goal_miss; - unsigned int gc_dst_overflow; - unsigned int in_hlist_search; - unsigned int out_hlist_search; }; -extern struct ip_rt_acct *ip_rt_acct; +extern struct ip_rt_acct __percpu *ip_rt_acct; struct in_device; -extern int ip_rt_init(void); -extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw, - __be32 src, struct net_device *dev); -extern void rt_cache_flush(int how); -extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp); -extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp); -extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags); -extern int ip_route_input(struct sk_buff*, __be32 dst, __be32 src, u8 tos, struct net_device *devin); -extern unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, unsigned short new_mtu); -extern void ip_rt_send_redirect(struct sk_buff *skb); - -extern unsigned inet_addr_type(struct net *net, __be32 addr); -extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); -extern void ip_rt_multicast_event(struct in_device *); -extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); -extern void ip_rt_get_source(u8 *src, struct rtable *rt); -extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); + +int ip_rt_init(void); +void rt_cache_flush(struct net *net); +void rt_flush_dev(struct net_device *dev); +struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); +struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, + struct sock *sk); +struct dst_entry *ipv4_blackhole_route(struct net *net, + struct dst_entry *dst_orig); + +static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp) +{ + return ip_route_output_flow(net, flp, NULL); +} + +static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, + __be32 saddr, u8 tos, int oif) +{ + struct flowi4 fl4 = { + .flowi4_oif = oif, + .flowi4_tos = tos, + .daddr = daddr, + .saddr = saddr, + }; + return ip_route_output_key(net, &fl4); +} + +static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4, + struct sock *sk, + __be32 daddr, __be32 saddr, + __be16 dport, __be16 sport, + __u8 proto, __u8 tos, int oif) +{ + flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, + RT_SCOPE_UNIVERSE, proto, + sk ? inet_sk_flowi_flags(sk) : 0, + daddr, saddr, dport, sport); + if (sk) + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + return ip_route_output_flow(net, fl4, sk); +} + +static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4, + __be32 daddr, __be32 saddr, + __be32 gre_key, __u8 tos, int oif) +{ + memset(fl4, 0, sizeof(*fl4)); + fl4->flowi4_oif = oif; + fl4->daddr = daddr; + fl4->saddr = saddr; + fl4->flowi4_tos = tos; + fl4->flowi4_proto = IPPROTO_GRE; + fl4->fl4_gre_key = gre_key; + return ip_route_output_key(net, fl4); +} + +int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, + u8 tos, struct net_device *devin); + +static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, + u8 tos, struct net_device *devin) +{ + int err; + + rcu_read_lock(); + err = ip_route_input_noref(skb, dst, src, tos, devin); + if (!err) + skb_dst_force(skb); + rcu_read_unlock(); + + return err; +} + +void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, + u32 mark, u8 protocol, int flow_flags); +void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu); +void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, + u8 protocol, int flow_flags); +void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); +void ip_rt_send_redirect(struct sk_buff *skb); + +unsigned int inet_addr_type(struct net *net, __be32 addr); +unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, + __be32 addr); +void ip_rt_multicast_event(struct in_device *); +int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); +void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); struct in_ifaddr; -extern void fib_add_ifaddr(struct in_ifaddr *); +void fib_add_ifaddr(struct in_ifaddr *); +void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); -static inline void ip_rt_put(struct rtable * rt) +static inline void ip_rt_put(struct rtable *rt) { - if (rt) - dst_release(&rt->u.dst); + /* dst_release() accepts a NULL parameter. + * We rely on dst being first structure in struct rtable + */ + BUILD_BUG_ON(offsetof(struct rtable, dst) != 0); + dst_release(&rt->dst); } #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) @@ -145,66 +214,103 @@ static inline char rt_tos2priority(u8 tos) return ip_tos2prio[IPTOS_TOS(tos)>>1]; } -static inline int ip_route_connect(struct rtable **rp, __be32 dst, - __be32 src, u32 tos, int oif, u8 protocol, - __be16 sport, __be16 dport, struct sock *sk, - int flags) +/* ip_route_connect() and ip_route_newports() work in tandem whilst + * binding a socket for a new outgoing connection. + * + * In order to use IPSEC properly, we must, in the end, have a + * route that was looked up using all available keys including source + * and destination ports. + * + * However, if a source port needs to be allocated (the user specified + * a wildcard source port) we need to obtain addressing information + * in order to perform that allocation. + * + * So ip_route_connect() looks up a route using wildcarded source and + * destination ports in the key, simply so that we can get a pair of + * addresses to use for port allocation. + * + * Later, once the ports are allocated, ip_route_newports() will make + * another route lookup if needed to make sure we catch any IPSEC + * rules keyed on the port information. + * + * The callers allocate the flow key on their stack, and must pass in + * the same flowi4 object to both the ip_route_connect() and the + * ip_route_newports() calls. + */ + +static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src, + u32 tos, int oif, u8 protocol, + __be16 sport, __be16 dport, + struct sock *sk) { - struct flowi fl = { .oif = oif, - .mark = sk->sk_mark, - .nl_u = { .ip4_u = { .daddr = dst, - .saddr = src, - .tos = tos } }, - .proto = protocol, - .uli_u = { .ports = - { .sport = sport, - .dport = dport } } }; + __u8 flow_flags = 0; + + if (inet_sk(sk)->transparent) + flow_flags |= FLOWI_FLAG_ANYSRC; + + flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, + protocol, flow_flags, dst, src, dport, sport); +} + +static inline struct rtable *ip_route_connect(struct flowi4 *fl4, + __be32 dst, __be32 src, u32 tos, + int oif, u8 protocol, + __be16 sport, __be16 dport, + struct sock *sk) +{ + struct net *net = sock_net(sk); + struct rtable *rt; + + ip_route_connect_init(fl4, dst, src, tos, oif, protocol, + sport, dport, sk); - int err; - struct net *net = sk->sk_net; if (!dst || !src) { - err = __ip_route_output_key(net, rp, &fl); - if (err) - return err; - fl.fl4_dst = (*rp)->rt_dst; - fl.fl4_src = (*rp)->rt_src; - ip_rt_put(*rp); - *rp = NULL; + rt = __ip_route_output_key(net, fl4); + if (IS_ERR(rt)) + return rt; + ip_rt_put(rt); + flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr); } - security_sk_classify_flow(sk, &fl); - return ip_route_output_flow(net, rp, &fl, sk, flags); + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + return ip_route_output_flow(net, fl4, sk); } -static inline int ip_route_newports(struct rtable **rp, u8 protocol, - __be16 sport, __be16 dport, struct sock *sk) +static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt, + __be16 orig_sport, __be16 orig_dport, + __be16 sport, __be16 dport, + struct sock *sk) { - if (sport != (*rp)->fl.fl_ip_sport || - dport != (*rp)->fl.fl_ip_dport) { - struct flowi fl; - - memcpy(&fl, &(*rp)->fl, sizeof(fl)); - fl.fl_ip_sport = sport; - fl.fl_ip_dport = dport; - fl.proto = protocol; - ip_rt_put(*rp); - *rp = NULL; - security_sk_classify_flow(sk, &fl); - return ip_route_output_flow(sk->sk_net, rp, &fl, sk, 0); + if (sport != orig_sport || dport != orig_dport) { + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; + ip_rt_put(rt); + flowi4_update_output(fl4, sk->sk_bound_dev_if, + RT_CONN_FLAGS(sk), fl4->daddr, + fl4->saddr); + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + return ip_route_output_flow(sock_net(sk), fl4, sk); } - return 0; + return rt; } -extern void rt_bind_peer(struct rtable *rt, int create); - -static inline struct inet_peer *rt_get_peer(struct rtable *rt) +static inline int inet_iif(const struct sk_buff *skb) { - if (rt->peer) - return rt->peer; + int iif = skb_rtable(skb)->rt_iif; - rt_bind_peer(rt, 0); - return rt->peer; + if (iif) + return iif; + return skb->skb_iif; } -extern ctl_table ipv4_route_table[]; +extern int sysctl_ip_default_ttl; + +static inline int ip4_dst_hoplimit(const struct dst_entry *dst) +{ + int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); + + if (hoplimit == 0) + hoplimit = sysctl_ip_default_ttl; + return hoplimit; +} #endif /* _ROUTE_H */ diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 793863e09c6..72240e5ac2c 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -4,17 +4,18 @@ #include <linux/rtnetlink.h> #include <net/netlink.h> -typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *); +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); +typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *); -extern int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); -extern void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); -extern int rtnl_unregister(int protocol, int msgtype); -extern void rtnl_unregister_all(int protocol); +int __rtnl_register(int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func); +void rtnl_register(int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func); +int rtnl_unregister(int protocol, int msgtype); +void rtnl_unregister_all(int protocol); -static inline int rtnl_msg_family(struct nlmsghdr *nlh) +static inline int rtnl_msg_family(const struct nlmsghdr *nlh) { if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg)) return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family; @@ -38,9 +39,13 @@ static inline int rtnl_msg_family(struct nlmsghdr *nlh) * @get_size: Function to calculate required room for dumping device * specific netlink attributes * @fill_info: Function to dump device specific netlink attributes - * @get_xstats_size: Function to calculate required room for dumping devic + * @get_xstats_size: Function to calculate required room for dumping device * specific statistics * @fill_xstats: Function to dump device specific statistics + * @get_num_tx_queues: Function to determine number of transmit queues + * to create when creating a new device. + * @get_num_rx_queues: Function to determine number of receive queues + * to create when creating a new device. */ struct rtnl_link_ops { struct list_head list; @@ -55,13 +60,15 @@ struct rtnl_link_ops { int (*validate)(struct nlattr *tb[], struct nlattr *data[]); - int (*newlink)(struct net_device *dev, + int (*newlink)(struct net *src_net, + struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]); int (*changelink)(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]); - void (*dellink)(struct net_device *dev); + void (*dellink)(struct net_device *dev, + struct list_head *head); size_t (*get_size)(const struct net_device *dev); int (*fill_info)(struct sk_buff *skb, @@ -70,17 +77,70 @@ struct rtnl_link_ops { size_t (*get_xstats_size)(const struct net_device *dev); int (*fill_xstats)(struct sk_buff *skb, const struct net_device *dev); + unsigned int (*get_num_tx_queues)(void); + unsigned int (*get_num_rx_queues)(void); + + int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_validate)(struct nlattr *tb[], + struct nlattr *data[]); + int (*slave_changelink)(struct net_device *dev, + struct net_device *slave_dev, + struct nlattr *tb[], + struct nlattr *data[]); + size_t (*get_slave_size)(const struct net_device *dev, + const struct net_device *slave_dev); + int (*fill_slave_info)(struct sk_buff *skb, + const struct net_device *dev, + const struct net_device *slave_dev); }; -extern int __rtnl_link_register(struct rtnl_link_ops *ops); -extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); +int __rtnl_link_register(struct rtnl_link_ops *ops); +void __rtnl_link_unregister(struct rtnl_link_ops *ops); -extern int rtnl_link_register(struct rtnl_link_ops *ops); -extern void rtnl_link_unregister(struct rtnl_link_ops *ops); +int rtnl_link_register(struct rtnl_link_ops *ops); +void rtnl_link_unregister(struct rtnl_link_ops *ops); -extern struct net_device *rtnl_create_link(struct net *net, char *ifname, - const struct rtnl_link_ops *ops, struct nlattr *tb[]); -extern const struct nla_policy ifla_policy[IFLA_MAX+1]; +/** + * struct rtnl_af_ops - rtnetlink address family operations + * + * @list: Used internally + * @family: Address family + * @fill_link_af: Function to fill IFLA_AF_SPEC with address family + * specific netlink attributes. + * @get_link_af_size: Function to calculate size of address family specific + * netlink attributes. + * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr + * for invalid configuration settings. + * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify + * net_device accordingly. + */ +struct rtnl_af_ops { + struct list_head list; + int family; + + int (*fill_link_af)(struct sk_buff *skb, + const struct net_device *dev); + size_t (*get_link_af_size)(const struct net_device *dev); + + int (*validate_link_af)(const struct net_device *dev, + const struct nlattr *attr); + int (*set_link_af)(struct net_device *dev, + const struct nlattr *attr); +}; + +void __rtnl_af_unregister(struct rtnl_af_ops *ops); + +void rtnl_af_register(struct rtnl_af_ops *ops); +void rtnl_af_unregister(struct rtnl_af_ops *ops); + +struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); +struct net_device *rtnl_create_link(struct net *net, char *ifname, + const struct rtnl_link_ops *ops, + struct nlattr *tb[]); +int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); + +int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index ab502ec1c61..624f9857c83 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -4,7 +4,6 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/rcupdate.h> -#include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/pkt_cls.h> #include <net/gen_stats.h> @@ -15,48 +14,121 @@ struct qdisc_walker; struct tcf_walker; struct module; -struct qdisc_rate_table -{ +struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; struct qdisc_rate_table *next; int refcnt; }; -struct Qdisc -{ +enum qdisc_state_t { + __QDISC_STATE_SCHED, + __QDISC_STATE_DEACTIVATED, + __QDISC_STATE_THROTTLED, +}; + +/* + * following bits are only changed while qdisc lock is held + */ +enum qdisc___state_t { + __QDISC___STATE_RUNNING = 1, +}; + +struct qdisc_size_table { + struct rcu_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[]; +}; + +struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); struct sk_buff * (*dequeue)(struct Qdisc *dev); - unsigned flags; -#define TCQ_F_BUILTIN 1 -#define TCQ_F_THROTTLED 2 -#define TCQ_F_INGRESS 4 - int padded; - struct Qdisc_ops *ops; + unsigned int flags; +#define TCQ_F_BUILTIN 1 +#define TCQ_F_INGRESS 2 +#define TCQ_F_CAN_BYPASS 4 +#define TCQ_F_MQROOT 8 +#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for + * q->dev_queue : It can test + * netif_xmit_frozen_or_stopped() before + * dequeueing next packet. + * Its true for MQ/MQPRIO slaves, or non + * multiqueue device. + */ +#define TCQ_F_WARN_NONWC (1 << 16) + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table __rcu *stab; + struct list_head list; u32 handle; u32 parent; - atomic_t refcnt; - struct sk_buff_head q; - struct net_device *dev; - struct list_head list; - - struct gnet_stats_basic bstats; - struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; - spinlock_t *stats_lock; - struct rcu_head q_rcu; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); + void *u32_node; + /* This field is deprecated, but it is still used by CBQ * and it will live until better solution will be invented. */ struct Qdisc *__parent; + struct netdev_queue *dev_queue; + + struct gnet_stats_rate_est64 rate_est; + struct Qdisc *next_sched; + struct sk_buff *gso_skb; + /* + * For performance sake on SMP, we put highly modified fields at the end + */ + unsigned long state; + struct sk_buff_head q; + struct gnet_stats_basic_packed bstats; + unsigned int __state; + struct gnet_stats_queue qstats; + struct rcu_head rcu_head; + int padded; + atomic_t refcnt; + + spinlock_t busylock ____cacheline_aligned_in_smp; }; -struct Qdisc_class_ops +static inline bool qdisc_is_running(const struct Qdisc *qdisc) +{ + return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; +} + +static inline bool qdisc_run_begin(struct Qdisc *qdisc) +{ + if (qdisc_is_running(qdisc)) + return false; + qdisc->__state |= __QDISC___STATE_RUNNING; + return true; +} + +static inline void qdisc_run_end(struct Qdisc *qdisc) +{ + qdisc->__state &= ~__QDISC___STATE_RUNNING; +} + +static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) { + return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; +} + +static inline void qdisc_throttled(struct Qdisc *qdisc) +{ + set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); +} + +static inline void qdisc_unthrottled(struct Qdisc *qdisc) +{ + clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); +} + +struct Qdisc_class_ops { /* Child qdisc manipulation */ + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long cl, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); @@ -83,8 +155,7 @@ struct Qdisc_class_ops struct gnet_dump *); }; -struct Qdisc_ops -{ +struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[IFNAMSIZ]; @@ -92,13 +163,14 @@ struct Qdisc_ops int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); - int (*requeue)(struct sk_buff *, struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); unsigned int (*drop)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *arg); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *arg); + void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); @@ -107,44 +179,44 @@ struct Qdisc_ops }; -struct tcf_result -{ +struct tcf_result { unsigned long class; u32 classid; }; -struct tcf_proto_ops -{ - struct tcf_proto_ops *next; +struct tcf_proto_ops { + struct list_head head; char kind[IFNAMSIZ]; - int (*classify)(struct sk_buff*, struct tcf_proto*, - struct tcf_result *); + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); int (*init)(struct tcf_proto*); void (*destroy)(struct tcf_proto*); unsigned long (*get)(struct tcf_proto*, u32 handle); void (*put)(struct tcf_proto*, unsigned long); - int (*change)(struct tcf_proto*, unsigned long, + int (*change)(struct net *net, struct sk_buff *, + struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, - unsigned long *); + unsigned long *, bool); int (*delete)(struct tcf_proto*, unsigned long); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); /* rtnetlink specific */ - int (*dump)(struct tcf_proto*, unsigned long, + int (*dump)(struct net*, struct tcf_proto*, unsigned long, struct sk_buff *skb, struct tcmsg*); struct module *owner; }; -struct tcf_proto -{ +struct tcf_proto { /* Fast access part */ struct tcf_proto *next; void *root; - int (*classify)(struct sk_buff*, struct tcf_proto*, - struct tcf_result *); + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); __be16 protocol; /* All the rest */ @@ -152,41 +224,277 @@ struct tcf_proto u32 classid; struct Qdisc *q; void *data; - struct tcf_proto_ops *ops; + const struct tcf_proto_ops *ops; +}; + +struct qdisc_skb_cb { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 _pad; + unsigned char data[20]; }; +static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) +{ + struct qdisc_skb_cb *qcb; + + BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); + BUILD_BUG_ON(sizeof(qcb->data) < sz); +} + +static inline int qdisc_qlen(const struct Qdisc *q) +{ + return q->q.qlen; +} + +static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) +{ + return (struct qdisc_skb_cb *)skb->cb; +} + +static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) +{ + return &qdisc->q.lock; +} + +static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) +{ + return qdisc->dev_queue->qdisc; +} + +static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) +{ + return qdisc->dev_queue->qdisc_sleeping; +} + +/* The qdisc root lock is a mechanism by which to top level + * of a qdisc tree can be locked from any qdisc node in the + * forest. This allows changing the configuration of some + * aspect of the qdisc tree while blocking out asynchronous + * qdisc access in the packet processing paths. + * + * It is only legal to do this when the root will not change + * on us. Otherwise we'll potentially lock the wrong qdisc + * root. This is enforced by holding the RTNL semaphore, which + * all users of this lock accessor must do. + */ +static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) +{ + struct Qdisc *root = qdisc_root(qdisc); + + ASSERT_RTNL(); + return qdisc_lock(root); +} + +static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) +{ + struct Qdisc *root = qdisc_root_sleeping(qdisc); + + ASSERT_RTNL(); + return qdisc_lock(root); +} -extern void qdisc_lock_tree(struct net_device *dev); -extern void qdisc_unlock_tree(struct net_device *dev); +static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) +{ + return qdisc->dev_queue->dev; +} -#define sch_tree_lock(q) qdisc_lock_tree((q)->dev) -#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev) -#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev) -#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev) +static inline void sch_tree_lock(const struct Qdisc *q) +{ + spin_lock_bh(qdisc_root_sleeping_lock(q)); +} + +static inline void sch_tree_unlock(const struct Qdisc *q) +{ + spin_unlock_bh(qdisc_root_sleeping_lock(q)); +} + +#define tcf_tree_lock(tp) sch_tree_lock((tp)->q) +#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) extern struct Qdisc noop_qdisc; extern struct Qdisc_ops noop_qdisc_ops; +extern struct Qdisc_ops pfifo_fast_ops; +extern struct Qdisc_ops mq_qdisc_ops; +extern const struct Qdisc_ops *default_qdisc_ops; + +struct Qdisc_class_common { + u32 classid; + struct hlist_node hnode; +}; + +struct Qdisc_class_hash { + struct hlist_head *hash; + unsigned int hashsize; + unsigned int hashmask; + unsigned int hashelems; +}; + +static inline unsigned int qdisc_class_hash(u32 id, u32 mask) +{ + id ^= id >> 8; + id ^= id >> 4; + return id & mask; +} -extern void dev_init_scheduler(struct net_device *dev); -extern void dev_shutdown(struct net_device *dev); -extern void dev_activate(struct net_device *dev); -extern void dev_deactivate(struct net_device *dev); -extern void qdisc_reset(struct Qdisc *qdisc); -extern void qdisc_destroy(struct Qdisc *qdisc); -extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); -extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops); -extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, - struct Qdisc_ops *ops, u32 parentid); -extern void tcf_destroy(struct tcf_proto *tp); -extern void tcf_destroy_chain(struct tcf_proto *fl); +static inline struct Qdisc_class_common * +qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) +{ + struct Qdisc_class_common *cl; + unsigned int h; + + h = qdisc_class_hash(id, hash->hashmask); + hlist_for_each_entry(cl, &hash->hash[h], hnode) { + if (cl->classid == id) + return cl; + } + return NULL; +} + +int qdisc_class_hash_init(struct Qdisc_class_hash *); +void qdisc_class_hash_insert(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_remove(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); +void qdisc_class_hash_destroy(struct Qdisc_class_hash *); + +void dev_init_scheduler(struct net_device *dev); +void dev_shutdown(struct net_device *dev); +void dev_activate(struct net_device *dev); +void dev_deactivate(struct net_device *dev); +void dev_deactivate_many(struct list_head *head); +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc); +void qdisc_reset(struct Qdisc *qdisc); +void qdisc_destroy(struct Qdisc *qdisc); +void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops); +struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops, u32 parentid); +void __qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct qdisc_size_table *stab); +void tcf_destroy(struct tcf_proto *tp); +void tcf_destroy_chain(struct tcf_proto **fl); + +/* Reset all TX qdiscs greater then index of a device. */ +static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) +{ + struct Qdisc *qdisc; + + for (; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } +} + +static inline void qdisc_reset_all_tx(struct net_device *dev) +{ + qdisc_reset_all_tx_gt(dev, 0); +} + +/* Are all TX queues of the device empty? */ +static inline bool qdisc_all_tx_empty(const struct net_device *dev) +{ + unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + const struct Qdisc *q = txq->qdisc; + + if (q->q.qlen) + return false; + } + return true; +} + +/* Are any of the TX qdiscs changing? */ +static inline bool qdisc_tx_changing(const struct net_device *dev) +{ + unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + if (txq->qdisc != txq->qdisc_sleeping) + return true; + } + return false; +} + +/* Is the device using the noop qdisc on all queues? */ +static inline bool qdisc_tx_is_noop(const struct net_device *dev) +{ + unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + if (txq->qdisc != &noop_qdisc) + return false; + } + return true; +} + +static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) +{ + return qdisc_skb_cb(skb)->pkt_len; +} + +/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ +enum net_xmit_qdisc_t { + __NET_XMIT_STOLEN = 0x00010000, + __NET_XMIT_BYPASS = 0x00020000, +}; + +#ifdef CONFIG_NET_CLS_ACT +#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) +#else +#define net_xmit_drop_count(e) (1) +#endif + +static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct Qdisc *sch) +{ +#ifdef CONFIG_NET_SCHED + struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); + + if (stab) + __qdisc_calculate_pkt_len(skb, stab); +#endif +} + +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + qdisc_calculate_pkt_len(skb, sch); + return sch->enqueue(skb, sch); +} + +static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) +{ + qdisc_skb_cb(skb)->pkt_len = skb->len; + return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; +} + + +static inline void bstats_update(struct gnet_stats_basic_packed *bstats, + const struct sk_buff *skb) +{ + bstats->bytes += qdisc_pkt_len(skb); + bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; +} + +static inline void qdisc_bstats_update(struct Qdisc *sch, + const struct sk_buff *skb) +{ + bstats_update(&sch->bstats, skb); +} static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_tail(list, skb); - sch->qstats.backlog += skb->len; - sch->bstats.bytes += skb->len; - sch->bstats.packets++; + sch->qstats.backlog += qdisc_pkt_len(skb); return NET_XMIT_SUCCESS; } @@ -201,8 +509,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, { struct sk_buff *skb = __skb_dequeue(list); - if (likely(skb != NULL)) - sch->qstats.backlog -= skb->len; + if (likely(skb != NULL)) { + sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); + } return skb; } @@ -212,13 +522,33 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) return __qdisc_dequeue_head(sch, &sch->q); } +static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, + struct sk_buff_head *list) +{ + struct sk_buff *skb = __skb_dequeue(list); + + if (likely(skb != NULL)) { + unsigned int len = qdisc_pkt_len(skb); + sch->qstats.backlog -= len; + kfree_skb(skb); + return len; + } + + return 0; +} + +static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) +{ + return __qdisc_queue_drop_head(sch, &sch->q); +} + static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, struct sk_buff_head *list) { struct sk_buff *skb = __skb_dequeue_tail(list); if (likely(skb != NULL)) - sch->qstats.backlog -= skb->len; + sch->qstats.backlog -= qdisc_pkt_len(skb); return skb; } @@ -228,19 +558,38 @@ static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) return __qdisc_dequeue_tail(sch, &sch->q); } -static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, - struct sk_buff_head *list) +static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) +{ + return skb_peek(&sch->q); +} + +/* generic pseudo peek method for non-work-conserving qdisc */ +static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) { - __skb_queue_head(list, skb); - sch->qstats.backlog += skb->len; - sch->qstats.requeues++; + /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ + if (!sch->gso_skb) { + sch->gso_skb = sch->dequeue(sch); + if (sch->gso_skb) + /* it's still part of the queue */ + sch->q.qlen++; + } - return NET_XMIT_SUCCESS; + return sch->gso_skb; } -static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) +/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ +static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) { - return __qdisc_requeue(skb, sch, &sch->q); + struct sk_buff *skb = sch->gso_skb; + + if (skb) { + sch->gso_skb = NULL; + sch->q.qlen--; + } else { + skb = sch->dequeue(sch); + } + + return skb; } static inline void __qdisc_reset_queue(struct Qdisc *sch, @@ -250,7 +599,7 @@ static inline void __qdisc_reset_queue(struct Qdisc *sch, * We do not know the backlog in bytes of this list, it * is up to the caller to correct it */ - skb_queue_purge(list); + __skb_queue_purge(list); } static inline void qdisc_reset_queue(struct Qdisc *sch) @@ -265,7 +614,7 @@ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); if (likely(skb != NULL)) { - unsigned int len = skb->len; + unsigned int len = qdisc_pkt_len(skb); kfree_skb(skb); return len; } @@ -312,14 +661,17 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) slot = 0; slot >>= rtab->rate.cell_log; if (slot > 255) - return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); + return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; return rtab->data[slot]; } #ifdef CONFIG_NET_CLS_ACT -static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) +static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, + int action) { - struct sk_buff *n = skb_clone(skb, gfp_mask); + struct sk_buff *n; + + n = skb_clone(skb, gfp_mask); if (n) { n->tc_verd = SET_TC_VERD(n->tc_verd, 0); @@ -330,4 +682,42 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) } #endif +struct psched_ratecfg { + u64 rate_bytes_ps; /* bytes per second */ + u32 mult; + u16 overhead; + u8 linklayer; + u8 shift; +}; + +static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, + unsigned int len) +{ + len += r->overhead; + + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) + return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; + + return ((u64)len * r->mult) >> r->shift; +} + +void psched_ratecfg_precompute(struct psched_ratecfg *r, + const struct tc_ratespec *conf, + u64 rate64); + +static inline void psched_ratecfg_getrate(struct tc_ratespec *res, + const struct psched_ratecfg *r) +{ + memset(res, 0, sizeof(*res)); + + /* legacy struct tc_ratespec has a 32bit @rate field + * Qdisc using 64bit rate should add new attributes + * in order to maintain compatibility. + */ + res->rate = min_t(u64, r->rate_bytes_ps, ~0U); + + res->overhead = r->overhead; + res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); +} + #endif diff --git a/include/net/scm.h b/include/net/scm.h index 06df126103c..262532d111f 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -10,29 +10,34 @@ /* Well, we should have at least one descriptor open * to accept passed FDs 8) */ -#define SCM_MAX_FD 255 +#define SCM_MAX_FD 253 -struct scm_fp_list -{ - int count; - struct file *fp[SCM_MAX_FD]; +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; }; -struct scm_cookie -{ - struct ucred creds; /* Skb credentials */ +struct scm_fp_list { + short count; + short max; + struct file *fp[SCM_MAX_FD]; +}; + +struct scm_cookie { + struct pid *pid; /* Skb credentials */ struct scm_fp_list *fp; /* Passed files */ + struct scm_creds creds; /* Skb credentials */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Passed security ID */ #endif - unsigned long seq; /* Connection seqno */ }; -extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); -extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); -extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); -extern void __scm_destroy(struct scm_cookie *scm); -extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); +void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); +void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); +int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); +void __scm_destroy(struct scm_cookie *scm); +struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl); #ifdef CONFIG_SECURITY_NETWORK static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) @@ -44,21 +49,36 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co { } #endif /* CONFIG_SECURITY_NETWORK */ +static __inline__ void scm_set_cred(struct scm_cookie *scm, + struct pid *pid, kuid_t uid, kgid_t gid) +{ + scm->pid = get_pid(pid); + scm->creds.pid = pid_vnr(pid); + scm->creds.uid = uid; + scm->creds.gid = gid; +} + +static __inline__ void scm_destroy_cred(struct scm_cookie *scm) +{ + put_pid(scm->pid); + scm->pid = NULL; +} + static __inline__ void scm_destroy(struct scm_cookie *scm) { - if (scm && scm->fp) + scm_destroy_cred(scm); + if (scm->fp) __scm_destroy(scm); } static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm) + struct scm_cookie *scm, bool forcecreds) { - struct task_struct *p = current; - scm->creds.uid = p->uid; - scm->creds.gid = p->gid; - scm->creds.pid = task_tgid_vnr(p); - scm->fp = NULL; - scm->seq = 0; + memset(scm, 0, sizeof(*scm)); + scm->creds.uid = INVALID_UID; + scm->creds.gid = INVALID_GID; + if (forcecreds) + scm_set_cred(scm, task_tgid(current), current_uid(), current_gid()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; @@ -89,16 +109,24 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, int flags) { - if (!msg->msg_control) - { + if (!msg->msg_control) { if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp) msg->msg_flags |= MSG_CTRUNC; scm_destroy(scm); return; } - if (test_bit(SOCK_PASSCRED, &sock->flags)) - put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); + if (test_bit(SOCK_PASSCRED, &sock->flags)) { + struct user_namespace *current_ns = current_user_ns(); + struct ucred ucreds = { + .pid = scm->creds.pid, + .uid = from_kuid_munged(current_ns, scm->creds.uid), + .gid = from_kgid_munged(current_ns, scm->creds.gid), + }; + put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); + } + + scm_destroy_cred(scm); scm_passec(sock, msg, scm); diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index 49bc9577c61..f2d58aa37a6 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h @@ -16,22 +16,15 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Vlad Yasevich <vladislav.yasevich@hp.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_auth_h__ diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index ba75c67cb99..4a5b9a306c6 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -19,16 +19,12 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Dinakaran Joseph @@ -37,42 +33,46 @@ * * Rewritten to use libcrc32c by: * Vlad Yasevich <vladislav.yasevich@hp.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ +#ifndef __sctp_checksum_h__ +#define __sctp_checksum_h__ + #include <linux/types.h> #include <net/sctp/sctp.h> #include <linux/crc32c.h> +#include <linux/crc32.h> -static inline __u32 sctp_start_cksum(__u8 *buffer, __u16 length) +static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum) { - __u32 crc = ~(__u32) 0; - __u8 zero[sizeof(__u32)] = {0}; - - /* Optimize this routine to be SCTP specific, knowing how - * to skip the checksum field of the SCTP header. + /* This uses the crypto implementation of crc32c, which is either + * implemented w/ hardware support or resolves to __crc32c_le(). */ - - /* Calculate CRC up to the checksum. */ - crc = crc32c(crc, buffer, sizeof(struct sctphdr) - sizeof(__u32)); - - /* Skip checksum field of the header. */ - crc = crc32c(crc, zero, sizeof(__u32)); - - /* Calculate the rest of the CRC. */ - crc = crc32c(crc, &buffer[sizeof(struct sctphdr)], - length - sizeof(struct sctphdr)); - return crc; + return crc32c(sum, buff, len); } -static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) +static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, + int offset, int len) { - return crc32c(crc32, buffer, length); + return __crc32c_le_combine(csum, csum2, len); } -static inline __u32 sctp_end_cksum(__u32 crc32) +static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, + unsigned int offset) { - return ntohl(~crc32); + struct sctphdr *sh = sctp_hdr(skb); + __le32 ret, old = sh->checksum; + const struct skb_checksum_ops ops = { + .update = sctp_csum_update, + .combine = sctp_csum_combine, + }; + + sh->checksum = 0; + ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset, + ~(__u32)0, &ops)); + sh->checksum = old; + + return ret; } + +#endif /* __sctp_checksum_h__ */ diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 10ae2da6f93..4b7cd695e43 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -19,23 +19,20 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * - * Please send any bug reports or fixes you make to one of the - * following email addresses: + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> * - * La Monte H.P. Yarroll <piggy@acm.org> - * Karl Knutson <karl@athena.chicago.il.us> - * Ardelle Fan <ardelle.fan@intel.com> - * Sridhar Samudrala <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Ardelle Fan <ardelle.fan@intel.com> + * Sridhar Samudrala <sri@us.ibm.com> */ - #ifndef __net_sctp_command_h__ #define __net_sctp_command_h__ @@ -63,6 +60,7 @@ typedef enum { SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */ SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */ SCTP_CMD_TIMER_START, /* Start a timer. */ + SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */ SCTP_CMD_TIMER_STOP, /* Stop a timer. */ SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */ @@ -73,11 +71,11 @@ typedef enum { SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */ SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */ SCTP_CMD_STRIKE, /* Mark a strike against a transport. */ - SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */ SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */ - SCTP_CMD_TRANSPORT_RESET, /* Reset the status of a transport. */ + SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */ + SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */ SCTP_CMD_REPORT_ERROR, /* Pass this error back out of the sm. */ SCTP_CMD_REPORT_BAD_TAG, /* Verification tags didn't match. */ @@ -104,12 +102,14 @@ typedef enum { SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ + SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ + SCTP_CMD_SEND_MSG, /* Send the whole use message */ + SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ + SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ + SCTP_CMD_SET_ASOC, /* Restore association context */ SCTP_CMD_LAST } sctp_verb_t; -#define SCTP_CMD_MAX (SCTP_CMD_LAST - 1) -#define SCTP_CMD_NUM_VERBS (SCTP_CMD_MAX + 1) - /* How many commands can you put in an sctp_cmd_seq_t? * This is a rather arbitrary number, ideally derived from a careful * analysis of the state functions, but in reality just taken from @@ -127,8 +127,6 @@ typedef union { __be16 err; sctp_state_t state; sctp_event_timeout_t to; - unsigned long zero; - void *ptr; struct sctp_chunk *chunk; struct sctp_association *asoc; struct sctp_transport *transport; @@ -137,6 +135,7 @@ typedef union { struct sctp_ulpevent *ulpevent; struct sctp_packet *packet; sctp_sackhdr_t *sackh; + struct sctp_datamsg *msg; } sctp_arg_t; /* We are simulating ML type constructors here. @@ -150,23 +149,15 @@ typedef union { * which takes an __s32 and returns a sctp_arg_t containing the * __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg. */ -static inline sctp_arg_t SCTP_NULL(void) -{ - sctp_arg_t retval; retval.ptr = NULL; return retval; -} -static inline sctp_arg_t SCTP_NOFORCE(void) -{ - sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 0; return retval; -} -static inline sctp_arg_t SCTP_FORCE(void) -{ - sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 1; return retval; -} #define SCTP_ARG_CONSTRUCTOR(name, type, elt) \ static inline sctp_arg_t \ SCTP_## name (type arg) \ -{ sctp_arg_t retval = {.zero = 0UL}; retval.elt = arg; return retval; } +{ sctp_arg_t retval;\ + memset(&retval, 0, sizeof(sctp_arg_t));\ + retval.elt = arg;\ + return retval;\ +} SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) SCTP_ARG_CONSTRUCTOR(U32, __u32, u32) @@ -177,7 +168,6 @@ SCTP_ARG_CONSTRUCTOR(ERROR, int, error) SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */ SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state) SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to) -SCTP_ARG_CONSTRUCTOR(PTR, void *, ptr) SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk) SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc) SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport) @@ -186,6 +176,24 @@ SCTP_ARG_CONSTRUCTOR(PEER_INIT, sctp_init_chunk_t *, init) SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent) SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet) SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh) +SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg) + +static inline sctp_arg_t SCTP_FORCE(void) +{ + return SCTP_I32(1); +} + +static inline sctp_arg_t SCTP_NOFORCE(void) +{ + return SCTP_I32(0); +} + +static inline sctp_arg_t SCTP_NULL(void) +{ + sctp_arg_t retval; + memset(&retval, 0, sizeof(sctp_arg_t)); + return retval; +} typedef struct { sctp_arg_t obj; @@ -205,12 +213,11 @@ typedef struct { int sctp_init_cmd_seq(sctp_cmd_seq_t *seq); /* Add a command to an sctp_cmd_seq_t. - * Return 0 if the command sequence is full. * * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above * to wrap data which goes in the obj argument. */ -int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj); +void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj); /* Return the next command structure in an sctp_cmd_seq. * Return NULL at the end of the sequence. diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index c32ddf0279c..307728f622e 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -19,16 +19,12 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> @@ -39,9 +35,6 @@ * Xingang Guo <xingang.guo@intel.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_constants_h__ @@ -49,7 +42,6 @@ #include <linux/sctp.h> #include <linux/ipv6.h> /* For ipv6hdr. */ -#include <net/sctp/user.h> #include <net/tcp_states.h> /* For TCP states used in sctp_sock_state_t */ /* Value used for stream negotiation. */ @@ -61,7 +53,6 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; * symbols. CIDs are dense through SCTP_CID_BASE_MAX. */ #define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE -#define SCTP_CID_MAX SCTP_CID_ASCONF_ACK #define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1) @@ -86,9 +77,6 @@ typedef enum { } sctp_event_t; -#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE -#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1) - /* As a convenience for the state machine, we append SCTP_EVENT_* and * SCTP_ULP_* to the list of possible chunks. */ @@ -154,7 +142,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other) SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive) -#define sctp_chunk_is_control(a) (a->chunk_hdr->type != SCTP_CID_DATA) #define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA) /* Calculate the actual data size in a data chunk */ @@ -162,9 +149,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive) - (unsigned long)(c->chunk_hdr)\ - sizeof(sctp_data_chunk_t))) -#define SCTP_MAX_ERROR_CAUSE SCTP_ERROR_NONEXIST_IP -#define SCTP_NUM_ERROR_CAUSE 10 - /* Internal error codes */ typedef enum { @@ -195,15 +179,14 @@ typedef enum { /* SCTP state defines for internal state machine */ typedef enum { - SCTP_STATE_EMPTY = 0, - SCTP_STATE_CLOSED = 1, - SCTP_STATE_COOKIE_WAIT = 2, - SCTP_STATE_COOKIE_ECHOED = 3, - SCTP_STATE_ESTABLISHED = 4, - SCTP_STATE_SHUTDOWN_PENDING = 5, - SCTP_STATE_SHUTDOWN_SENT = 6, - SCTP_STATE_SHUTDOWN_RECEIVED = 7, - SCTP_STATE_SHUTDOWN_ACK_SENT = 8, + SCTP_STATE_CLOSED = 0, + SCTP_STATE_COOKIE_WAIT = 1, + SCTP_STATE_COOKIE_ECHOED = 2, + SCTP_STATE_ESTABLISHED = 3, + SCTP_STATE_SHUTDOWN_PENDING = 4, + SCTP_STATE_SHUTDOWN_SENT = 5, + SCTP_STATE_SHUTDOWN_RECEIVED = 6, + SCTP_STATE_SHUTDOWN_ACK_SENT = 7, } sctp_state_t; @@ -231,7 +214,7 @@ typedef enum { SCTP_SS_LISTENING = TCP_LISTEN, SCTP_SS_ESTABLISHING = TCP_SYN_SENT, SCTP_SS_ESTABLISHED = TCP_ESTABLISHED, - SCTP_SS_DISCONNECTING = TCP_CLOSING, + SCTP_SS_CLOSING = TCP_CLOSING, } sctp_sock_state_t; /* These functions map various type to printable names. */ @@ -241,7 +224,9 @@ const char *sctp_tname(const sctp_subtype_t); /* timeouts */ const char *sctp_pname(const sctp_subtype_t); /* primitives */ /* This is a table of printable names of sctp_state_t's. */ -extern const char *sctp_state_tbl[], *sctp_evttype_tbl[], *sctp_status_tbl[]; +extern const char *const sctp_state_tbl[]; +extern const char *const sctp_evttype_tbl[]; +extern const char *const sctp_status_tbl[]; /* Maximum chunk length considering padding requirements. */ enum { SCTP_MAX_CHUNK_LEN = ((1<<16) - sizeof(__u32)) }; @@ -261,8 +246,9 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 }; * must be less than 65535 (2^16 - 1), or we will have overflow * problems creating SACK's. */ -#define SCTP_TSN_MAP_SIZE 2048 -#define SCTP_TSN_MAX_GAP 65535 +#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG +#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL +#define SCTP_TSN_MAP_SIZE 4096 /* We will not record more than this many duplicate TSNs between two * SACKs. The minimum PMTU is 576. Remove all the headers and there @@ -297,21 +283,19 @@ enum { SCTP_MAX_GABS = 16 }; #define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */ -#define SCTP_DEF_MAX_INIT 6 -#define SCTP_DEF_MAX_SEND 10 - #define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */ #define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */ #define SCTP_DEFAULT_MAXWINDOW 65535 /* default rwnd size */ +#define SCTP_DEFAULT_RWND_SHIFT 4 /* by default, update on 1/16 of + * rcvbuf, which is 1/8 of initial + * window + */ #define SCTP_DEFAULT_MAXSEGMENT 1500 /* MTU size, this is the limit * to which we will raise the P-MTU. */ #define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */ -#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */ -#define SCTP_HOW_LONG_COOKIE_LIVE 3600 /* How many seconds the current - * secret will live? - */ + #define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */ #define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */ @@ -320,14 +304,6 @@ enum { SCTP_MAX_GABS = 16 }; * functions simpler to write. */ -#if defined (CONFIG_SCTP_HMAC_MD5) -#define SCTP_COOKIE_HMAC_ALG "hmac(md5)" -#elif defined (CONFIG_SCTP_HMAC_SHA1) -#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)" -#else -#define SCTP_COOKIE_HMAC_ALG NULL -#endif - /* These return values describe the success or failure of a number of * routines which form the lower interface to SCTP_outqueue. */ @@ -342,6 +318,7 @@ typedef enum { typedef enum { SCTP_TRANSPORT_UP, SCTP_TRANSPORT_DOWN, + SCTP_TRANSPORT_PF, } sctp_transport_cmd_t; /* These are the address scopes defined mainly for IPv4 addresses @@ -359,6 +336,13 @@ typedef enum { SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */ } sctp_scope_t; +typedef enum { + SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */ + SCTP_SCOPE_POLICY_ENABLE, /* Enable IPv4 address scoping */ + SCTP_SCOPE_POLICY_PRIVATE, /* Follow draft but allow IPv4 private addresses */ + SCTP_SCOPE_POLICY_LINK, /* Follow draft but allow IPv4 link local addresses */ +} sctp_scope_policy_t; + /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, * 192.88.99.0/24. diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ea806732b08..8e4de46c052 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -21,16 +21,12 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> @@ -41,9 +37,6 @@ * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __net_sctp_h__ @@ -62,13 +55,6 @@ * and will continue to evolve. */ - - -#ifdef TEST_FRAME -#undef CONFIG_SCTP_DBG_OBJCNT -#undef CONFIG_SYSCTL -#endif /* TEST_FRAME */ - #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> @@ -78,7 +64,7 @@ #include <linux/jiffies.h> #include <linux/idr.h> -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ip6_route.h> #endif @@ -90,30 +76,12 @@ #include <net/sctp/structs.h> #include <net/sctp/constants.h> - -/* Set SCTP_DEBUG flag via config if not already set. */ -#ifndef SCTP_DEBUG -#ifdef CONFIG_SCTP_DBG_MSG -#define SCTP_DEBUG 1 -#else -#define SCTP_DEBUG 0 -#endif /* CONFIG_SCTP_DBG */ -#endif /* SCTP_DEBUG */ - #ifdef CONFIG_IP_SCTP_MODULE #define SCTP_PROTOSW_FLAG 0 #else /* static! */ #define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT #endif - -/* Certain internal static functions need to be exported when - * compiled into the test frame. - */ -#ifndef SCTP_STATIC -#define SCTP_STATIC static -#endif - /* * Function declarations. */ @@ -121,13 +89,11 @@ /* * sctp/protocol.c */ -extern struct sock *sctp_get_ctl_sock(void); -extern void sctp_local_addr_free(struct rcu_head *head); -extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, - sctp_scope_t, gfp_t gfp, - int flags); -extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); -extern int sctp_register_pf(struct sctp_pf *, sa_family_t); +int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *, + sctp_scope_t, gfp_t gfp, int flags); +struct sctp_pf *sctp_get_pf_specific(sa_family_t family); +int sctp_register_pf(struct sctp_pf *, sa_family_t); +void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int); /* * sctp/socket.c @@ -135,19 +101,24 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t); int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); +void sctp_data_ready(struct sock *sk); unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); +void sctp_copy_sock(struct sock *newsk, struct sock *sk, + struct sctp_association *asoc); +extern struct percpu_counter sctp_sockets_allocated; +int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); /* * sctp/primitive.c */ -int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg); -int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg); -int sctp_primitive_ABORT(struct sctp_association *, void *arg); -int sctp_primitive_SEND(struct sctp_association *, void *arg); -int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg); -int sctp_primitive_ASCONF(struct sctp_association *, void *arg); +int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg); /* * sctp/input.c @@ -158,12 +129,14 @@ void sctp_hash_established(struct sctp_association *); void sctp_unhash_established(struct sctp_association *); void sctp_hash_endpoint(struct sctp_endpoint *); void sctp_unhash_endpoint(struct sctp_endpoint *); -struct sock *sctp_err_lookup(int family, struct sk_buff *, +struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, struct sctp_transport **); void sctp_err_finish(struct sock *, struct sctp_association *); void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, struct sctp_transport *t, __u32 pmtu); +void sctp_icmp_redirect(struct sock *, struct sctp_transport *, + struct sk_buff *); void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t); @@ -173,12 +146,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc, /* * sctp/proc.c */ -int sctp_snmp_proc_init(void); -void sctp_snmp_proc_exit(void); -int sctp_eps_proc_init(void); -void sctp_eps_proc_exit(void); -int sctp_assocs_proc_init(void); -void sctp_assocs_proc_exit(void); +int sctp_snmp_proc_init(struct net *net); +void sctp_snmp_proc_exit(struct net *net); +int sctp_eps_proc_init(struct net *net); +void sctp_eps_proc_exit(struct net *net); +int sctp_assocs_proc_init(struct net *net); +void sctp_assocs_proc_exit(struct net *net); +int sctp_remaddr_proc_init(struct net *net); +void sctp_remaddr_proc_exit(struct net *net); /* @@ -195,44 +170,14 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly; * Section: Macros, externs, and inlines */ - -#ifdef TEST_FRAME -#include <test_frame.h> -#else - -/* spin lock wrappers. */ -#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags) -#define sctp_spin_unlock_irqrestore(lock, flags) \ - spin_unlock_irqrestore(lock, flags) -#define sctp_local_bh_disable() local_bh_disable() -#define sctp_local_bh_enable() local_bh_enable() -#define sctp_spin_lock(lock) spin_lock(lock) -#define sctp_spin_unlock(lock) spin_unlock(lock) -#define sctp_write_lock(lock) write_lock(lock) -#define sctp_write_unlock(lock) write_unlock(lock) -#define sctp_read_lock(lock) read_lock(lock) -#define sctp_read_unlock(lock) read_unlock(lock) - -/* sock lock wrappers. */ -#define sctp_lock_sock(sk) lock_sock(sk) -#define sctp_release_sock(sk) release_sock(sk) -#define sctp_bh_lock_sock(sk) bh_lock_sock(sk) -#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk) -#define SCTP_SOCK_SLEEP_PRE(sk) SOCK_SLEEP_PRE(sk) -#define SCTP_SOCK_SLEEP_POST(sk) SOCK_SLEEP_POST(sk) - /* SCTP SNMP MIB stats handlers */ -DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics); -#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field) -#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field) -#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field) -#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field) - -#endif /* !TEST_FRAME */ +#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) +#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) +#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field) +#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) /* sctp mib definitions */ -enum -{ +enum { SCTP_MIB_NUM = 0, SCTP_MIB_CURRESTAB, /* CurrEstab */ SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */ @@ -273,50 +218,20 @@ enum #define SCTP_MIB_MAX __SCTP_MIB_MAX struct sctp_mib { unsigned long mibs[SCTP_MIB_MAX]; -} __SNMP_MIB_ALIGN__; - - -/* Print debugging messages. */ -#if SCTP_DEBUG -extern int sctp_debug_flag; -#define SCTP_DEBUG_PRINTK(whatever...) \ - ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever))) -#define SCTP_DEBUG_PRINTK_IPADDR(lead, trail, leadparm, saddr, otherparms...) \ - if (sctp_debug_flag) { \ - if (saddr->sa.sa_family == AF_INET6) { \ - printk(KERN_DEBUG \ - lead NIP6_FMT trail, \ - leadparm, \ - NIP6(saddr->v6.sin6_addr), \ - otherparms); \ - } else { \ - printk(KERN_DEBUG \ - lead NIPQUAD_FMT trail, \ - leadparm, \ - NIPQUAD(saddr->v4.sin_addr.s_addr), \ - otherparms); \ - } \ - } -#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } -#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } - -#define SCTP_ASSERT(expr, str, func) \ - if (!(expr)) { \ - SCTP_DEBUG_PRINTK("Assertion Failed: %s(%s) at %s:%s:%d\n", \ - str, (#expr), __FILE__, __FUNCTION__, __LINE__); \ - func; \ - } - -#else /* SCTP_DEBUG */ - -#define SCTP_DEBUG_PRINTK(whatever...) -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) -#define SCTP_ENABLE_DEBUG -#define SCTP_DISABLE_DEBUG -#define SCTP_ASSERT(expr, str, func) - -#endif /* SCTP_DEBUG */ +}; +/* helper function to track stats about max rto and related transport */ +static inline void sctp_max_rto(struct sctp_association *asoc, + struct sctp_transport *trans) +{ + if (asoc->stats.max_obs_rto < (__u64)trans->rto) { + asoc->stats.max_obs_rto = trans->rto; + memset(&asoc->stats.obs_rto_ipaddr, 0, + sizeof(struct sockaddr_storage)); + memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr, + trans->af_specific->sockaddr_len); + } +} /* * Macros for keeping a global reference of object allocations. @@ -349,36 +264,35 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0) #define SCTP_DBG_OBJCNT_ENTRY(name) \ {.label= #name, .counter= &sctp_dbg_objcnt_## name} -void sctp_dbg_objcnt_init(void); -void sctp_dbg_objcnt_exit(void); +void sctp_dbg_objcnt_init(struct net *); +void sctp_dbg_objcnt_exit(struct net *); #else #define SCTP_DBG_OBJCNT_INC(name) #define SCTP_DBG_OBJCNT_DEC(name) -static inline void sctp_dbg_objcnt_init(void) { return; } -static inline void sctp_dbg_objcnt_exit(void) { return; } +static inline void sctp_dbg_objcnt_init(struct net *net) { return; } +static inline void sctp_dbg_objcnt_exit(struct net *net) { return; } #endif /* CONFIG_SCTP_DBG_OBJCOUNT */ #if defined CONFIG_SYSCTL void sctp_sysctl_register(void); void sctp_sysctl_unregister(void); +int sctp_sysctl_net_register(struct net *net); +void sctp_sysctl_net_unregister(struct net *net); #else static inline void sctp_sysctl_register(void) { return; } static inline void sctp_sysctl_unregister(void) { return; } -static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen) { - return -ENOSYS; -} +static inline int sctp_sysctl_net_register(struct net *net) { return 0; } +static inline void sctp_sysctl_net_unregister(struct net *net) { return; } #endif /* Size of Supported Address Parameter for 'x' address types. */ #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16)) -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) void sctp_v6_pf_init(void); void sctp_v6_pf_exit(void); @@ -402,19 +316,17 @@ static inline void sctp_v6_del_protocol(void) { return; } /* Map an association to an assoc_id. */ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) { - return (asoc?asoc->assoc_id:0); + return asoc ? asoc->assoc_id : 0; } /* Look up the association by its id. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); +int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp); /* A macro to walk a list of skbs. */ #define sctp_skb_for_each(pos, head, tmp) \ -for (pos = (head)->next;\ - tmp = (pos)->next, pos != ((struct sk_buff *)(head));\ - pos = tmp) - + skb_queue_walk_safe(head, pos, tmp) /* A helper to append an entire skb list (list) to another (head). */ static inline void sctp_skb_list_tail(struct sk_buff_head *list, @@ -422,16 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list, { unsigned long flags; - sctp_spin_lock_irqsave(&head->lock, flags); - sctp_spin_lock(&list->lock); - - list_splice((struct list_head *)list, (struct list_head *)head->prev); + spin_lock_irqsave(&head->lock, flags); + spin_lock(&list->lock); - head->qlen += list->qlen; - list->qlen = 0; + skb_queue_splice_tail_init(list, head); - sctp_spin_unlock(&list->lock); - sctp_spin_unlock_irqrestore(&head->lock, flags); + spin_unlock(&list->lock); + spin_unlock_irqrestore(&head->lock, flags); } /** @@ -463,6 +372,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { struct sctp_ulpevent *event = sctp_skb2event(skb); + skb_orphan(skb); skb->sk = sk; skb->destructor = sctp_sock_rfree; atomic_add(event->rmem_len, &sk->sk_rmem_alloc); @@ -475,7 +385,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) /* Tests if the list has one and only one entry. */ static inline int sctp_list_single_entry(struct list_head *head) { - return ((head->next != head) && (head->next == head->prev)); + return (head->next != head) && (head->next == head->prev); } /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ @@ -500,25 +410,26 @@ static inline __s32 sctp_jitter(__u32 rto) } /* Break down data chunks at this point. */ -static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu) +static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) { + struct sctp_sock *sp = sctp_sk(asoc->base.sk); int frag = pmtu; frag -= sp->pf->af->net_header_len; frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); - if (sp->user_frag) - frag = min_t(int, frag, sp->user_frag); + if (asoc->user_frag) + frag = min_t(int, frag, asoc->user_frag); frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN); return frag; } -static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc) +static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) { - sctp_assoc_sync_pmtu(asoc); + sctp_assoc_sync_pmtu(sk, asoc); asoc->pmtu_pending = 0; } @@ -532,7 +443,6 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) #define _sctp_walk_params(pos, chunk, end, member)\ for (pos.v = chunk->member;\ - pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ pos.v += WORD_ROUND(ntohs(pos.p->length))) @@ -543,7 +453,6 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ sizeof(sctp_chunkhdr_t));\ - (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) @@ -559,32 +468,10 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\ /* Round an int up to the next multiple of 4. */ #define WORD_ROUND(s) (((s)+3)&~3) -/* Make a new instance of type. */ -#define t_new(type, flags) (type *)kmalloc(sizeof(type), flags) - -/* Compare two timevals. */ -#define tv_lt(s, t) \ - (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec)) - -/* Add tv1 to tv2. */ -#define TIMEVAL_ADD(tv1, tv2) \ -({ \ - suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \ - time_t secs = (tv2).tv_sec + (tv1).tv_sec; \ -\ - if (usecs >= 1000000) { \ - usecs -= 1000000; \ - secs++; \ - } \ - (tv2).tv_sec = secs; \ - (tv2).tv_usec = usecs; \ -}) - /* External references. */ extern struct proto sctp_prot; extern struct proto sctpv6_prot; -extern struct proc_dir_entry *proc_net_sctp; void sctp_put_port(struct sock *sk); extern struct idr sctp_assocs_id; @@ -602,7 +489,7 @@ static inline int ipver2af(__u8 ipver) return AF_INET6; default: return 0; - }; + } } /* Convert from an address parameter type to an address family. */ @@ -615,38 +502,28 @@ static inline int param_type2af(__be16 type) return AF_INET6; default: return 0; - }; -} - -/* Perform some sanity checks. */ -static inline int sctp_sanity_check(void) -{ - SCTP_ASSERT(sizeof(struct sctp_ulpevent) <= - sizeof(((struct sk_buff *)0)->cb), - "SCTP: ulpevent does not fit in skb!\n", return 0); - - return 1; + } } /* Warning: The following hash functions assume a power of two 'size'. */ /* This is the hash function for the SCTP port hash table. */ -static inline int sctp_phashfn(__u16 lport) +static inline int sctp_phashfn(struct net *net, __u16 lport) { - return (lport & (sctp_port_hashsize - 1)); + return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1); } /* This is the hash function for the endpoint hash table. */ -static inline int sctp_ep_hashfn(__u16 lport) +static inline int sctp_ep_hashfn(struct net *net, __u16 lport) { - return (lport & (sctp_ep_hashsize - 1)); + return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1); } /* This is the hash function for the association hash table. */ -static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport) +static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport) { - int h = (lport << 16) + rport; + int h = (lport << 16) + rport + net_hash_mix(net); h ^= h>>8; - return (h & (sctp_assoc_hashsize - 1)); + return h & (sctp_assoc_hashsize - 1); } /* This is the hash function for the association hash table. This is @@ -657,11 +534,11 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag) { int h = (lport << 16) + rport; h ^= vtag; - return (h & (sctp_assoc_hashsize-1)); + return h & (sctp_assoc_hashsize - 1); } -#define sctp_for_each_hentry(epb, node, head) \ - hlist_for_each_entry(epb, node, head, node) +#define sctp_for_each_hentry(epb, head) \ + hlist_for_each_entry(epb, head, node) /* Is a socket of this style? */ #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) @@ -704,4 +581,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); } +/* The cookie is always 0 since this is how it's used in the + * pmtu code. + */ +static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) +{ + if (t->dst && !dst_check(t->dst, t->dst_cookie)) { + dst_release(t->dst); + t->dst = NULL; + } + + return t->dst; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ef9e7ed2c82..7f4eeb340a5 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -21,16 +21,12 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> @@ -42,9 +38,6 @@ * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include <linux/types.h> @@ -77,7 +70,8 @@ typedef struct { int action; } sctp_sm_command_t; -typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *, +typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, + const struct sctp_endpoint *, const struct sctp_association *, const sctp_subtype_t type, void *arg, @@ -125,6 +119,7 @@ sctp_state_fn_t sctp_sf_beat_8_3; sctp_state_fn_t sctp_sf_backbeat_8_3; sctp_state_fn_t sctp_sf_do_9_2_final; sctp_state_fn_t sctp_sf_do_9_2_shutdown; +sctp_state_fn_t sctp_sf_do_9_2_shut_ctsn; sctp_state_fn_t sctp_sf_do_ecn_cwr; sctp_state_fn_t sctp_sf_do_ecne; sctp_state_fn_t sctp_sf_ootb; @@ -164,6 +159,7 @@ sctp_state_fn_t sctp_sf_do_prm_requestheartbeat; sctp_state_fn_t sctp_sf_do_prm_asconf; /* Prototypes for other event state functions. */ +sctp_state_fn_t sctp_sf_do_no_pending_tsn; sctp_state_fn_t sctp_sf_do_9_2_start_shutdown; sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack; sctp_state_fn_t sctp_sf_ignore_other; @@ -176,7 +172,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); -const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t, +const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *, + sctp_event_t, sctp_state_t, sctp_subtype_t); int sctp_chunk_iif(const struct sctp_chunk *); @@ -227,10 +224,13 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, const size_t ); +struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, + const struct sctp_chunk *, + struct sctp_paramhdr *); +struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *, + const struct sctp_chunk *); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, - const struct sctp_transport *, - const void *payload, - const size_t paylen); + const struct sctp_transport *); struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *, const struct sctp_chunk *, const void *payload, @@ -239,7 +239,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, - size_t paylen); + size_t paylen, + size_t reserve_tail); struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, union sctp_addr *, @@ -264,7 +265,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *); /* Prototypes for statetable processing. */ -int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, +int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *, struct sctp_association *asoc, @@ -274,6 +275,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); void sctp_generate_heartbeat_event(unsigned long peer); +void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *); @@ -339,12 +341,12 @@ enum { static inline int TSN_lt(__u32 s, __u32 t) { - return (((s) - (t)) & TSN_SIGN_BIT); + return ((s) - (t)) & TSN_SIGN_BIT; } static inline int TSN_lte(__u32 s, __u32 t) { - return (((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT)); + return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT); } /* Compare two SSNs */ @@ -363,12 +365,12 @@ enum { static inline int SSN_lt(__u16 s, __u16 t) { - return (((s) - (t)) & SSN_SIGN_BIT); + return ((s) - (t)) & SSN_SIGN_BIT; } static inline int SSN_lte(__u16 s, __u16 t) { - return (((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT)); + return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT); } /* @@ -382,15 +384,7 @@ enum { static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) { - return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); -} - - -/* Run sctp_add_cmd() generating a BUG() if there is a failure. */ -static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) -{ - if (unlikely(!sctp_add_cmd(seq, verb, obj))) - BUG(); + return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT); } /* Check VTAG of the packet matches the sender's own tag. */ @@ -440,7 +434,7 @@ sctp_vtag_verify_either(const struct sctp_chunk *chunk, */ if ((!sctp_test_T_bit(chunk) && (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) || - (sctp_test_T_bit(chunk) && + (sctp_test_T_bit(chunk) && asoc->c.peer_vtag && (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) { return 1; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9c827a749b6..f38588bf346 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -19,16 +19,12 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Randall Stewart <randall@sctp.chicago.il.us> @@ -46,21 +42,18 @@ * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_structs_h__ #define __sctp_structs_h__ -#include <linux/time.h> /* We get struct timespec. */ +#include <linux/ktime.h> #include <linux/socket.h> /* linux/in.h needs this!! */ #include <linux/in.h> /* We get struct sockaddr_in. */ #include <linux/in6.h> /* We get struct in6_addr */ #include <linux/ipv6.h> #include <asm/param.h> /* We get MAXHOSTNAMELEN. */ -#include <asm/atomic.h> /* This gets us atomic counters. */ +#include <linux/atomic.h> /* This gets us atomic counters. */ #include <linux/skbuff.h> /* We need sk_buff_head. */ #include <linux/workqueue.h> /* We need tq_struct. */ #include <linux/sctp.h> /* We need sctp* header structs. */ @@ -102,6 +95,7 @@ struct sctp_bind_bucket { unsigned short fastreuse; struct hlist_node node; struct hlist_head owner; + struct net *net; }; struct sctp_bind_hashbucket { @@ -118,123 +112,32 @@ struct sctp_hashbucket { /* The SCTP globals structure. */ extern struct sctp_globals { - /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values - * - * The following protocol parameters are RECOMMENDED: - * - * RTO.Initial - 3 seconds - * RTO.Min - 1 second - * RTO.Max - 60 seconds - * RTO.Alpha - 1/8 (3 when converted to right shifts.) - * RTO.Beta - 1/4 (2 when converted to right shifts.) - */ - unsigned int rto_initial; - unsigned int rto_min; - unsigned int rto_max; - - /* Note: rto_alpha and rto_beta are really defined as inverse - * powers of two to facilitate integer operations. - */ - int rto_alpha; - int rto_beta; - - /* Max.Burst - 4 */ - int max_burst; - - /* Whether Cookie Preservative is enabled(1) or not(0) */ - int cookie_preserve_enable; - - /* Valid.Cookie.Life - 60 seconds */ - unsigned int valid_cookie_life; - - /* Delayed SACK timeout 200ms default*/ - unsigned int sack_timeout; - - /* HB.interval - 30 seconds */ - unsigned int hb_interval; - - /* Association.Max.Retrans - 10 attempts - * Path.Max.Retrans - 5 attempts (per destination address) - * Max.Init.Retransmits - 8 attempts - */ - int max_retrans_association; - int max_retrans_path; - int max_retrans_init; - - /* - * Policy for preforming sctp/socket accounting - * 0 - do socket level accounting, all assocs share sk_sndbuf - * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes - */ - int sndbuf_policy; - - /* - * Policy for preforming sctp/socket accounting - * 0 - do socket level accounting, all assocs share sk_rcvbuf - * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes - */ - int rcvbuf_policy; - - /* The following variables are implementation specific. */ - - /* Default initialization values to be applied to new associations. */ - __u16 max_instreams; - __u16 max_outstreams; - /* This is a list of groups of functions for each address * family that we support. */ struct list_head address_families; /* This is the hash of all endpoints. */ - int ep_hashsize; struct sctp_hashbucket *ep_hashtable; - /* This is the hash of all associations. */ - int assoc_hashsize; struct sctp_hashbucket *assoc_hashtable; - /* This is the sctp port control hash. */ - int port_hashsize; struct sctp_bind_hashbucket *port_hashtable; - /* This is the global local address list. - * We actively maintain this complete list of addresses on - * the system by catching address add/delete events. - * - * It is a list of sctp_sockaddr_entry. - */ - struct list_head local_addr_list; - - /* Lock that protects the local_addr_list writers */ - spinlock_t addr_list_lock; - - /* Flag to indicate if addip is enabled. */ - int addip_enable; - int addip_noauth_enable; + /* Sizes of above hashtables. */ + int ep_hashsize; + int assoc_hashsize; + int port_hashsize; - /* Flag to indicate if PR-SCTP is enabled. */ - int prsctp_enable; + /* Default initialization values to be applied to new associations. */ + __u16 max_instreams; + __u16 max_outstreams; - /* Flag to idicate if SCTP-AUTH is enabled */ - int auth_enable; + /* Flag to indicate whether computing and verifying checksum + * is disabled. */ + bool checksum_disable; } sctp_globals; -#define sctp_rto_initial (sctp_globals.rto_initial) -#define sctp_rto_min (sctp_globals.rto_min) -#define sctp_rto_max (sctp_globals.rto_max) -#define sctp_rto_alpha (sctp_globals.rto_alpha) -#define sctp_rto_beta (sctp_globals.rto_beta) -#define sctp_max_burst (sctp_globals.max_burst) -#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life) -#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable) -#define sctp_max_retrans_association (sctp_globals.max_retrans_association) -#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) -#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy) -#define sctp_max_retrans_path (sctp_globals.max_retrans_path) -#define sctp_max_retrans_init (sctp_globals.max_retrans_init) -#define sctp_sack_timeout (sctp_globals.sack_timeout) -#define sctp_hb_interval (sctp_globals.hb_interval) #define sctp_max_instreams (sctp_globals.max_instreams) #define sctp_max_outstreams (sctp_globals.max_outstreams) #define sctp_address_families (sctp_globals.address_families) @@ -243,15 +146,8 @@ extern struct sctp_globals { #define sctp_assoc_hashsize (sctp_globals.assoc_hashsize) #define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) #define sctp_port_hashsize (sctp_globals.port_hashsize) -#define sctp_port_rover (sctp_globals.port_rover) -#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock) #define sctp_port_hashtable (sctp_globals.port_hashtable) -#define sctp_local_addr_list (sctp_globals.local_addr_list) -#define sctp_local_addr_lock (sctp_globals.addr_list_lock) -#define sctp_addip_enable (sctp_globals.addip_enable) -#define sctp_addip_noauth (sctp_globals.addip_noauth_enable) -#define sctp_prsctp_enable (sctp_globals.prsctp_enable) -#define sctp_auth_enable (sctp_globals.auth_enable) +#define sctp_checksum_disable (sctp_globals.checksum_disable) /* SCTP Socket type: UDP or TCP style. */ typedef enum { @@ -272,6 +168,7 @@ struct sctp_sock { /* Access to HMAC transform. */ struct crypto_hash *hmac; + char *sctp_hmac_alg; /* What is our base endpointer? */ struct sctp_endpoint *ep; @@ -300,6 +197,7 @@ struct sctp_sock { /* The default SACK delay timeout for new associations. */ __u32 sackdelay; + __u32 sackfreq; /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ __u32 param_flags; @@ -321,6 +219,8 @@ struct sctp_sock { atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ struct sk_buff_head pd_lobby; + struct list_head auto_asconf_list; + int do_auto_asconf; }; static inline struct sctp_sock *sctp_sk(const struct sock *sk) @@ -333,7 +233,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp) return (struct sock *)sp; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct sctp6_sock { struct sctp_sock sctp; struct ipv6_pinfo inet6; @@ -375,7 +275,7 @@ struct sctp_cookie { __u32 peer_ttag; /* When does this cookie expire? */ - struct timeval expiration; + ktime_t expiration; /* Number of inbound/outbound streams which are set * and negotiated during the INIT process. @@ -402,7 +302,7 @@ struct sctp_cookie { __u32 adaptation_ind; __u8 auth_random[sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH]; - __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS + 2]; + __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS * sizeof(__u16) + 2]; __u8 auth_chunks[sizeof(sctp_paramhdr_t) + SCTP_AUTH_MAX_CHUNKS]; /* This is a shim for my peer's INIT packet, followed by @@ -421,7 +321,7 @@ struct sctp_signed_cookie { __u8 signature[SCTP_SECRET_SIZE]; __u32 __pad; /* force sctp_cookie alignment to 64 bits */ struct sctp_cookie c; -} __attribute__((packed)); +} __packed; /* This is another convenience type to allocate memory for address * params for the maximum size and pass such structures around @@ -466,7 +366,7 @@ typedef struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; -} __attribute__((packed)) sctp_sender_hb_info_t; +} __packed sctp_sender_hb_info_t; /* * RFC 2960 1.3.2 Sequenced Delivery within Streams @@ -490,7 +390,6 @@ struct sctp_stream { struct sctp_ssnmap { struct sctp_stream in; struct sctp_stream out; - int malloced; }; struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, @@ -523,13 +422,12 @@ static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id, */ struct sctp_af { int (*sctp_xmit) (struct sk_buff *skb, - struct sctp_transport *, - int ipfragok); + struct sctp_transport *); int (*setsockopt) (struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*getsockopt) (struct sock *sk, int level, int optname, @@ -539,24 +437,21 @@ struct sctp_af { int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*compat_getsockopt) (struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); - struct dst_entry *(*get_dst) (struct sctp_association *asoc, - union sctp_addr *daddr, - union sctp_addr *saddr); - void (*get_saddr) (struct sctp_association *asoc, - struct dst_entry *dst, - union sctp_addr *daddr, - union sctp_addr *saddr); + void (*get_dst) (struct sctp_transport *t, + union sctp_addr *saddr, + struct flowi *fl, + struct sock *sk); + void (*get_saddr) (struct sctp_sock *sk, + struct sctp_transport *t, + struct flowi *fl); void (*copy_addrlist) (struct list_head *, struct net_device *); - void (*dst_saddr) (union sctp_addr *saddr, - struct dst_entry *dst, - __be16 port); int (*cmp_addr) (const union sctp_addr *addr1, const union sctp_addr *addr2); void (*addr_copy) (union sctp_addr *dst, @@ -587,6 +482,7 @@ struct sctp_af { int (*is_ce) (const struct sk_buff *sk); void (*seq_dump_addr)(struct seq_file *seq, union sctp_addr *addr); + void (*ecn_capable)(struct sock *sk); __u16 net_header_len; int sockaddr_len; sa_family_t sa_family; @@ -620,25 +516,22 @@ struct sctp_pf { struct sctp_datamsg { /* Chunks waiting to be submitted to lower layer. */ struct list_head chunks; - /* Chunks that have been transmitted. */ - struct list_head track; /* Reference counting. */ atomic_t refcnt; /* When is this message no longer interesting to the peer? */ unsigned long expires_at; /* Did the messenge fail to send? */ int send_error; - char send_failed; - /* Control whether chunks from this message can be abandoned. */ - char can_abandon; + u8 send_failed:1, + can_abandon:1, /* can chunks from this message can be abandoned. */ + can_delay; /* should this message be Nagle delayed */ }; struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, struct msghdr *, int len); -void sctp_datamsg_put(struct sctp_datamsg *); void sctp_datamsg_free(struct sctp_datamsg *); -void sctp_datamsg_track(struct sctp_chunk *); +void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); int sctp_chunk_abandoned(struct sctp_chunk *); @@ -731,20 +624,23 @@ struct sctp_chunk { */ struct sk_buff *auth_chunk; - __u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */ - __u8 resent; /* Has this chunk ever been retransmitted. */ - __u8 has_tsn; /* Does this chunk have a TSN yet? */ - __u8 has_ssn; /* Does this chunk have a SSN yet? */ - __u8 singleton; /* Was this the only chunk in the packet? */ - __u8 end_of_packet; /* Was this the last chunk in the packet? */ - __u8 ecn_ce_done; /* Have we processed the ECN CE bit? */ - __u8 pdiscard; /* Discard the whole packet now? */ - __u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */ - __s8 fast_retransmit; /* Is this chunk fast retransmitted? */ - __u8 tsn_missing_report; /* Data chunk missing counter. */ - __u8 data_accepted; /* At least 1 chunk in this packet accepted */ - __u8 auth; /* IN: was auth'ed | OUT: needs auth */ - __u8 has_asconf; /* IN: have seen an asconf before */ +#define SCTP_CAN_FRTX 0x0 +#define SCTP_NEED_FRTX 0x1 +#define SCTP_DONT_FRTX 0x2 + __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ + resent:1, /* Has this chunk ever been resent. */ + has_tsn:1, /* Does this chunk have a TSN yet? */ + has_ssn:1, /* Does this chunk have a SSN yet? */ + singleton:1, /* Only chunk in the packet? */ + end_of_packet:1, /* Last chunk in the packet? */ + ecn_ce_done:1, /* Have we processed the ECN CE bit? */ + pdiscard:1, /* Discard the whole packet now? */ + tsn_gap_acked:1, /* Is this chunk acked by a GAP ACK? */ + data_accepted:1, /* At least 1 chunk accepted */ + auth:1, /* IN: was auth'ed | OUT: needs auth */ + has_asconf:1, /* IN: have seen an asconf before */ + tsn_missing_report:2, /* Data chunk missing counter. */ + fast_retransmit:2; /* Is this chunk fast retransmitted? */ }; void sctp_chunk_hold(struct sctp_chunk *); @@ -775,6 +671,8 @@ struct sctp_sockaddr_entry { __u8 valid; }; +#define SCTP_ADDRESS_TICK_DELAY 500 + typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); /* This structure holds lists of chunks as we are assembling for @@ -803,22 +701,11 @@ struct sctp_packet { /* pointer to the auth chunk for this packet */ struct sctp_chunk *auth; - /* This packet contains a COOKIE-ECHO chunk. */ - __u8 has_cookie_echo; - - /* This packet contains a SACK chunk. */ - __u8 has_sack; - - /* This packet contains an AUTH chunk */ - __u8 has_auth; - - /* This packet contains at least 1 DATA chunk */ - __u8 has_data; - - /* SCTP cannot fragment this packet. So let ip fragment it. */ - __u8 ipfragok; - - __u8 malloced; + u8 has_cookie_echo:1, /* This packet contains a COOKIE-ECHO chunk. */ + has_sack:1, /* This packet contains a SACK chunk. */ + has_auth:1, /* This packet contains an AUTH chunk */ + has_data:1, /* This packet contains at least 1 DATA chunk */ + ipfragok:1; /* So let ip fragment this packet */ }; struct sctp_packet *sctp_packet_init(struct sctp_packet *, @@ -826,7 +713,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *, __u16 sport, __u16 dport); struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int); sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, - struct sctp_chunk *); + struct sctp_chunk *, int); sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, struct sctp_chunk *); int sctp_packet_transmit(struct sctp_packet *); @@ -834,7 +721,7 @@ void sctp_packet_free(struct sctp_packet *); static inline int sctp_packet_empty(struct sctp_packet *packet) { - return (packet->size == packet->overhead); + return packet->size == packet->overhead; } /* This represents a remote transport address. @@ -863,7 +750,32 @@ struct sctp_transport { /* Reference counting. */ atomic_t refcnt; - int dead; + __u32 dead:1, + /* RTO-Pending : A flag used to track if one of the DATA + * chunks sent to this address is currently being + * used to compute a RTT. If this flag is 0, + * the next DATA chunk sent to this destination + * should be used to compute a RTT and this flag + * should be set. Every time the RTT + * calculation completes (i.e. the DATA chunk + * is SACK'd) clear this flag. + */ + rto_pending:1, + + /* + * hb_sent : a flag that signals that we have a pending + * heartbeat. + */ + hb_sent:1, + + /* Is the Path MTU update pending on this tranport */ + pmtu_pending:1; + + /* Has this transport moved the ctsn since we last sacked */ + __u32 sack_generation; + u32 dst_cookie; + + struct flowi fl; /* This is the peer's IP address and port. */ union sctp_addr ipaddr; @@ -884,7 +796,6 @@ struct sctp_transport { */ /* RTO : The current retransmission timeout value. */ unsigned long rto; - unsigned long last_rto; __u32 rtt; /* This is the most recent RTT. */ @@ -894,17 +805,6 @@ struct sctp_transport { /* SRTT : The current smoothed round trip time. */ __u32 srtt; - /* RTO-Pending : A flag used to track if one of the DATA - * chunks sent to this address is currently being - * used to compute a RTT. If this flag is 0, - * the next DATA chunk sent to this destination - * should be used to compute a RTT and this flag - * should be set. Every time the RTT - * calculation completes (i.e. the DATA chunk - * is SACK'd) clear this flag. - */ - int rto_pending; - /* * These are the congestion stats. */ @@ -922,17 +822,13 @@ struct sctp_transport { /* Data that has been sent, but not acknowledged. */ __u32 flight_size; + __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ + /* Destination */ struct dst_entry *dst; /* Source address. */ union sctp_addr saddr; - /* When was the last time(in jiffies) that a data packet was sent on - * this transport? This is used to adjust the cwnd when the transport - * becomes inactive. - */ - unsigned long last_time_used; - /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to * the destination address every heartbeat interval. */ @@ -940,11 +836,12 @@ struct sctp_transport { /* SACK delay timeout */ unsigned long sackdelay; + __u32 sackfreq; - /* When was the last time (in jiffies) that we heard from this - * transport? We use this to pick new active and retran paths. + /* When was the last time that we heard from this transport? We use + * this to pick new active and retran paths. */ - unsigned long last_time_heard; + ktime_t last_time_heard; /* Last time(in jiffies) when cwnd is reduced due to the congestion * indication based on ECNE chunk. @@ -953,13 +850,15 @@ struct sctp_transport { /* This is the max_retrans value for the transport and will * be initialized from the assocs value. This can be changed - * using SCTP_SET_PEER_ADDR_PARAMS socket option. + * using the SCTP_SET_PEER_ADDR_PARAMS socket option. */ __u16 pathmaxrxt; - /* is the Path MTU update pending on this tranport */ - __u8 pmtu_pending; - + /* This is the partially failed retrans value for the transport + * and will be initialized from the assocs value. This can be changed + * using the SCTP_PEER_ADDR_THLDS socket option + */ + int pf_retrans; /* PMTU : The current known path MTU. */ __u32 pathmtu; @@ -970,7 +869,7 @@ struct sctp_transport { int init_sent_count; /* state : The current state of this destination, - * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKOWN. + * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN. */ int state; @@ -990,6 +889,9 @@ struct sctp_transport { /* Heartbeat timer is per destination. */ struct timer_list hb_timer; + /* Timer to handle ICMP proto unreachable envets */ + struct timer_list proto_unreach_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct @@ -1003,8 +905,6 @@ struct sctp_transport { /* This is the list of transports that have chunks to send. */ struct list_head send_ready; - int malloced; /* Is this structure kfree()able? */ - /* State information saved for SFR_CACC algorithm. The key * idea in SFR_CACC is to maintain state at the sender on a * per-destination basis when a changeover happens. @@ -1036,15 +936,17 @@ struct sctp_transport { /* 64-bit random number sent with heartbeat. */ __u64 hb_nonce; + + struct rcu_head rcu; }; -struct sctp_transport *sctp_transport_new(const union sctp_addr *, +struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *, gfp_t); void sctp_transport_set_owner(struct sctp_transport *, struct sctp_association *); void sctp_transport_route(struct sctp_transport *, union sctp_addr *, struct sctp_sock *); -void sctp_transport_pmtu(struct sctp_transport *); +void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_timers(struct sctp_transport *); void sctp_transport_hold(struct sctp_transport *); @@ -1052,9 +954,12 @@ void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32); void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t); +void sctp_transport_burst_limited(struct sctp_transport *); +void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *); -void sctp_transport_update_pmtu(struct sctp_transport *, u32); +void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); +void sctp_transport_immediate_rtx(struct sctp_transport *); /* This is the structure we use to queue packets as they come into @@ -1074,8 +979,6 @@ struct sctp_inq { * messages. */ struct work_struct immediate; - - int malloced; /* Is this structure kfree()able? */ }; void sctp_inq_init(struct sctp_inq *); @@ -1110,10 +1013,10 @@ struct sctp_outq { /* Data pending that has never been transmitted. */ struct list_head out_chunk_list; - unsigned out_qlen; /* Total length of queued data chunks. */ + unsigned int out_qlen; /* Total length of queued data chunks. */ /* Error of send failed, may used in SCTP_SEND_FAILED event. */ - unsigned error; + unsigned int error; /* These are control chunks we want to send. */ struct list_head control_chunk_list; @@ -1136,22 +1039,18 @@ struct sctp_outq { /* How many unackd bytes do we have in-flight? */ __u32 outstanding_bytes; + /* Are we doing fast-rtx on this queue */ + char fast_rtx; + /* Corked? */ char cork; - - /* Is this structure empty? */ - char empty; - - /* Are we kfree()able? */ - char malloced; }; void sctp_outq_init(struct sctp_association *, struct sctp_outq *); void sctp_outq_teardown(struct sctp_outq *); void sctp_outq_free(struct sctp_outq*); int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk); -int sctp_outq_flush(struct sctp_outq *, int); -int sctp_outq_sack(struct sctp_outq *, struct sctp_sackhdr *); +int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); int sctp_outq_is_empty(const struct sctp_outq *); void sctp_outq_restart(struct sctp_outq *); @@ -1182,13 +1081,11 @@ struct sctp_bind_addr { * peer(s) in INIT and INIT ACK chunks. */ struct list_head address_list; - - int malloced; /* Are we kfree()able? */ }; void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port); void sctp_bind_addr_free(struct sctp_bind_addr *); -int sctp_bind_addr_copy(struct sctp_bind_addr *dest, +int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, sctp_scope_t scope, gfp_t gfp, int flags); @@ -1200,6 +1097,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, struct sctp_sock *); +int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *, + struct sctp_sock *, struct sctp_sock *); int sctp_bind_addr_state(const struct sctp_bind_addr *bp, const union sctp_addr *addr); union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, @@ -1213,9 +1112,10 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, __u16 port, gfp_t gfp); sctp_scope_t sctp_scope(const union sctp_addr *); -int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); -int sctp_is_any(const union sctp_addr *addr); +int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); +int sctp_is_any(struct sock *sk, const union sctp_addr *addr); int sctp_addr_is_valid(const union sctp_addr *addr); +int sctp_is_ep_boundall(struct sock *sk); /* What type of endpoint? */ @@ -1251,11 +1151,9 @@ struct sctp_ep_common { /* Some fields to help us manage this object. * refcnt - Reference count access to this object. * dead - Do not attempt to use this object. - * malloced - Do we need to kfree this object? */ atomic_t refcnt; - char dead; - char malloced; + bool dead; /* What socket does this endpoint belong to? */ struct sock *sk; @@ -1313,10 +1211,7 @@ struct sctp_endpoint { * Discussion in [RFC1750] can be helpful in * selection of the key. */ - __u8 secret_key[SCTP_HOW_MANY_SECRETS][SCTP_SECRET_SIZE]; - int current_key; - int last_key; - int key_changed_at; + __u8 secret_key[SCTP_SECRET_SIZE]; /* digest: This is a digest of the sctp cookie. This field is * only used on the receive path when we try to validate @@ -1346,6 +1241,7 @@ struct sctp_endpoint { /* SCTP-AUTH: endpoint shared keys */ struct list_head endpoint_shared_keys; __u16 active_key_id; + __u8 auth_enable; }; /* Recover the outter endpoint structure. */ @@ -1370,14 +1266,15 @@ struct sctp_association *sctp_endpoint_lookup_assoc( int sctp_endpoint_is_peeled_off(struct sctp_endpoint *, const union sctp_addr *); struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *, - const union sctp_addr *); -int sctp_has_association(const union sctp_addr *laddr, + struct net *, const union sctp_addr *); +int sctp_has_association(struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr); -int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t, - sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, - struct sctp_chunk **err_chunk); -int sctp_process_init(struct sctp_association *, sctp_cid_t cid, +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + sctp_cid_t, sctp_init_chunk_t *peer_init, + struct sctp_chunk *chunk, struct sctp_chunk **err_chunk); +int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk, const union sctp_addr *peer, sctp_init_chunk_t *init, gfp_t gfp); __u32 sctp_generate_tag(const struct sctp_endpoint *); @@ -1391,6 +1288,40 @@ struct sctp_inithdr_host { __u32 initial_tsn; }; +/* SCTP_GET_ASSOC_STATS counters */ +struct sctp_priv_assoc_stats { + /* Maximum observed rto in the association during subsequent + * observations. Value is set to 0 if no RTO measurement took place + * The transport where the max_rto was observed is returned in + * obs_rto_ipaddr + */ + struct sockaddr_storage obs_rto_ipaddr; + __u64 max_obs_rto; + /* Total In and Out SACKs received and sent */ + __u64 isacks; + __u64 osacks; + /* Total In and Out packets received and sent */ + __u64 opackets; + __u64 ipackets; + /* Total retransmitted chunks */ + __u64 rtxchunks; + /* TSN received > next expected */ + __u64 outofseqtsns; + /* Duplicate Chunks received */ + __u64 idupchunks; + /* Gap Ack Blocks received */ + __u64 gapcnt; + /* Unordered data chunks sent and received */ + __u64 ouodchunks; + __u64 iuodchunks; + /* Ordered data chunks sent and received */ + __u64 oodchunks; + __u64 iodchunks; + /* Control chunks sent and received */ + __u64 octrlchunks; + __u64 ictrlchunks; +}; + /* RFC2960 * * 12. Recommended Transmission Control Block (TCB) Parameters @@ -1427,12 +1358,6 @@ struct sctp_association { /* This is all information about our peer. */ struct { - /* rwnd - * - * Peer Rwnd : Current calculated value of the peer's rwnd. - */ - __u32 rwnd; - /* transport_addr_list * * Peer : A list of SCTP transport addresses that the @@ -1450,6 +1375,12 @@ struct sctp_association { */ struct list_head transport_addr_list; + /* rwnd + * + * Peer Rwnd : Current calculated value of the peer's rwnd. + */ + __u32 rwnd; + /* transport_count * * Peer : A count of the number of peer addresses @@ -1531,7 +1462,20 @@ struct sctp_association { * in tsn_map--we get it by calling sctp_tsnmap_get_ctsn(). */ struct sctp_tsnmap tsn_map; - __u8 _map[sctp_tsnmap_storage_size(SCTP_TSN_MAP_SIZE)]; + + /* This mask is used to disable sending the ASCONF chunk + * with specified parameter to peer. + */ + __be16 addip_disabled_mask; + + /* These are capabilities which our peer advertised. */ + __u8 ecn_capable:1, /* Can peer do ECN? */ + ipv4_address:1, /* Peer understands IPv4 addresses? */ + ipv6_address:1, /* Peer understands IPv6 addresses? */ + hostname_address:1, /* Peer understands DNS addresses? */ + asconf_capable:1, /* Does peer support ADDIP? */ + prsctp_capable:1, /* Can peer do PR-SCTP? */ + auth_capable:1; /* Is peer doing SCTP-AUTH? */ /* Ack State : This flag indicates if the next received * : packet is to be responded to with a @@ -1544,26 +1488,14 @@ struct sctp_association { * : SACK's are not delayed (see Section 6). */ __u8 sack_needed; /* Do we need to sack the peer? */ - - /* These are capabilities which our peer advertised. */ - __u8 ecn_capable; /* Can peer do ECN? */ - __u8 ipv4_address; /* Peer understands IPv4 addresses? */ - __u8 ipv6_address; /* Peer understands IPv6 addresses? */ - __u8 hostname_address;/* Peer understands DNS addresses? */ - __u8 asconf_capable; /* Does peer support ADDIP? */ - __u8 prsctp_capable; /* Can peer do PR-SCTP? */ - __u8 auth_capable; /* Is peer doing SCTP-AUTH? */ + __u32 sack_cnt; + __u32 sack_generation; __u32 adaptation_ind; /* Adaptation Code point. */ - /* This mask is used to disable sending the ASCONF chunk - * with specified parameter to peer. - */ - __be16 addip_disabled_mask; - struct sctp_inithdr_host i; - int cookie_len; void *cookie; + int cookie_len; /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the @@ -1595,14 +1527,14 @@ struct sctp_association { */ sctp_state_t state; - /* The cookie life I award for any cookie. */ - struct timeval cookie_life; - /* Overall : The overall association error count. * Error Count : [Clear this any time I get something.] */ int overall_error_count; + /* The cookie life I award for any cookie. */ + ktime_t cookie_life; + /* These are the association's initial, max, and min RTO values. * These values will be initialized by system defaults, but can * be modified via the SCTP_RTOINFO socket option. @@ -1620,6 +1552,12 @@ struct sctp_association { */ int max_retrans; + /* This is the partially failed retrans value for the transport + * and will be initialized from the assocs value. This can be + * changed using the SCTP_PEER_ADDR_THLDS socket option + */ + int pf_retrans; + /* Maximum number of times the endpoint will retransmit INIT */ __u16 max_init_attempts; @@ -1651,10 +1589,10 @@ struct sctp_association { /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ __u32 param_flags; + __u32 sackfreq; /* SACK delay timeout */ unsigned long sackdelay; - unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES]; struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES]; @@ -1664,6 +1602,9 @@ struct sctp_association { /* Transport to which INIT chunk was last sent. */ struct sctp_transport *init_last_sent_to; + /* How many times have we resent a SHUTDOWN */ + int shutdown_retries; + /* Next TSN : The next TSN number to be assigned to a new * : DATA chunk. This is sent in the INIT or INIT * : ACK chunk to the peer and incremented each @@ -1690,11 +1631,22 @@ struct sctp_association { /* Highest TSN that is acknowledged by incoming SACKs. */ __u32 highest_sacked; + /* TSN marking the fast recovery exit point */ + __u32 fast_recovery_exit; + + /* Flag to track the current fast recovery state */ + __u8 fast_recovery; + /* The number of unacknowledged data chunks. Reported through * the SCTP_STATUS sockopt. */ __u16 unack_data; + /* The total number of data chunks that we've had to retransmit + * as the result of a T3 timer expiration + */ + __u32 rtx_data_chunks; + /* This is the association's receive buffer space. This value is used * to set a_rwnd field in an INIT or a SACK chunk. */ @@ -1708,6 +1660,12 @@ struct sctp_association { */ __u32 rwnd_over; + /* Keeps treack of rwnd pressure. This happens when we have + * a window, but not recevie buffer (i.e small packets). This one + * is releases slowly (1 PMTU at a time ). + */ + __u32 rwnd_press; + /* This is the sndbuf size in use for the association. * This corresponds to the sndbuf size for the association, * as specified in the sk->sndbuf. @@ -1726,6 +1684,7 @@ struct sctp_association { /* The message size at which SCTP fragmentation will occur. */ __u32 frag_point; + __u32 user_frag; /* Counter used to count INIT errors. */ int init_err_counter; @@ -1763,12 +1722,6 @@ struct sctp_association { /* How many duplicated TSNs have we seen? */ int numduptsns; - /* Number of seconds of idle time before an association is closed. - * In the association context, this is really used as a boolean - * since the real timeout is stored in the timeouts array - */ - __u32 autoclose; - /* These are to support * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses * and Enforcement of Flow and Message Limits" @@ -1856,6 +1809,9 @@ struct sctp_association { * after reaching 4294967295. */ __u32 addip_serial; + int src_out_of_asoc_ok; + union sctp_addr *asconf_addr_del_pending; + struct sctp_transport *new_transport; /* SCTP AUTH: list of the endpoint shared keys. These * keys are provided out of band by the user applicaton @@ -1875,11 +1831,10 @@ struct sctp_association { __u16 active_key_id; - /* Need to send an ECNE Chunk? */ - char need_ecne; + __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ + temp:1; /* Is it a temporary association? */ - /* Is it a temporary association? */ - char temp; + struct sctp_priv_assoc_stats stats; }; @@ -1909,10 +1864,8 @@ void sctp_association_free(struct sctp_association *); void sctp_association_put(struct sctp_association *); void sctp_association_hold(struct sctp_association *); -struct sctp_transport *sctp_assoc_choose_init_transport( - struct sctp_association *); -struct sctp_transport *sctp_assoc_choose_shutdown_transport( - struct sctp_association *); +struct sctp_transport *sctp_assoc_choose_alter_transport( + struct sctp_association *, struct sctp_transport *); void sctp_assoc_update_retran_path(struct sctp_association *); struct sctp_transport *sctp_assoc_lookup_paddr(const struct sctp_association *, const union sctp_addr *); @@ -1931,6 +1884,7 @@ void sctp_assoc_control_transport(struct sctp_association *, sctp_transport_cmd_t, sctp_sn_error_t); struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32); struct sctp_transport *sctp_assoc_is_match(struct sctp_association *, + struct net *, const union sctp_addr *, const union sctp_addr *); void sctp_assoc_migrate(struct sctp_association *, struct sock *); @@ -1939,15 +1893,15 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); -void sctp_assoc_sync_pmtu(struct sctp_association *); -void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned); -void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); +void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); +void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); +void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - gfp_t); + sctp_scope_t, gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); @@ -1956,7 +1910,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc); struct sctp_chunk *sctp_assoc_lookup_asconf_ack( const struct sctp_association *asoc, __be32 serial); - +void sctp_asconf_queue_teardown(struct sctp_association *asoc); int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2); diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h index 099211bf998..31b8dbaad45 100644 --- a/include/net/sctp/tsnmap.h +++ b/include/net/sctp/tsnmap.h @@ -22,25 +22,18 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Sridhar Samudrala <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include <net/sctp/constants.h> @@ -60,18 +53,7 @@ struct sctp_tsnmap { * It points at one of the two buffers with which we will * ping-pong between. */ - __u8 *tsn_map; - - /* This marks the tsn which overflows the tsn_map, when the - * cumulative ack point reaches this point we know we can switch - * maps (tsn_map and overflow_map swap). - */ - __u32 overflow_tsn; - - /* This is the overflow array for tsn_map. - * It points at one of the other ping-pong buffers. - */ - __u8 *overflow_map; + unsigned long *tsn_map; /* This is the TSN at tsn_map[0]. */ __u32 base_tsn; @@ -89,15 +71,15 @@ struct sctp_tsnmap { */ __u32 cumulative_tsn_ack_point; + /* This is the highest TSN we've marked. */ + __u32 max_tsn_seen; + /* This is the minimum number of TSNs we can track. This corresponds * to the size of tsn_map. Note: the overflow_map allows us to * potentially track more than this quantity. */ __u16 len; - /* This is the highest TSN we've marked. */ - __u32 max_tsn_seen; - /* Data chunks pending receipt. used by SCTP_STATUS sockopt */ __u16 pending_data; @@ -105,29 +87,19 @@ struct sctp_tsnmap { * every SACK. Store up to SCTP_MAX_DUP_TSNS worth of * information. */ - __be32 dup_tsns[SCTP_MAX_DUP_TSNS]; __u16 num_dup_tsns; - - /* Record gap ack block information here. */ - struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; - - int malloced; - - __u8 raw_map[0]; + __be32 dup_tsns[SCTP_MAX_DUP_TSNS]; }; struct sctp_tsnmap_iter { __u32 start; }; -/* This macro assists in creation of external storage for variable length - * internal buffers. We double allocate so the overflow map works. - */ -#define sctp_tsnmap_storage_size(count) (sizeof(__u8) * (count) * 2) - /* Initialize a block of memory as a tsnmap. */ struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len, - __u32 initial_tsn); + __u32 initial_tsn, gfp_t gfp); + +void sctp_tsnmap_free(struct sctp_tsnmap *map); /* Test the tracking state of this TSN. * Returns: @@ -138,7 +110,8 @@ struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len, int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn); /* Mark this TSN as seen. */ -void sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn); +int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn, + struct sctp_transport *trans); /* Mark this TSN and all lower as seen. */ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn); @@ -169,24 +142,16 @@ static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map) } /* How many gap ack blocks do we have recorded? */ -__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map); +__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, + struct sctp_gap_ack_block *gabs); /* Refresh the count on pending data. */ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map); -/* Return pointer to gap ack blocks as needed by SACK. */ -static inline struct sctp_gap_ack_block *sctp_tsnmap_get_gabs(struct sctp_tsnmap *map) -{ - return map->gabs; -} - /* Is there a gap in the TSN map? */ static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map) { - int has_gap; - - has_gap = (map->cumulative_tsn_ack_point != map->max_tsn_seen); - return has_gap; + return map->cumulative_tsn_ack_point != map->max_tsn_seen; } /* Mark a duplicate TSN. Note: limit the storage of duplicate TSN diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 9bcfc12275e..daacb32b55b 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -25,25 +25,18 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Sridhar Samudrala <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_ulpevent_h__ @@ -67,7 +60,7 @@ struct sctp_ulpevent { }; /* Retrieve the skb this event sits inside of. */ -static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev) +static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev) { return container_of((void *)ev, struct sk_buff, cb); } @@ -80,7 +73,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb) void sctp_ulpevent_free(struct sctp_ulpevent *); int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); -void sctp_queue_purge_ulpevents(struct sk_buff_head *list); +unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list); struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, @@ -132,6 +125,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey( const struct sctp_association *asoc, __u16 key_id, __u32 indication, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( + const struct sctp_association *asoc, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event); diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 2e5ee0d8458..e0dce07b879 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -24,24 +24,17 @@ * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp + * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Sridhar Samudrala <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_ulpqueue_h__ @@ -49,7 +42,6 @@ /* A structure to carry information to the ULP (e.g. Sockets API) */ struct sctp_ulpq { - char malloced; char pd_mode; struct sctp_association *asoc; struct sk_buff_head reasm; @@ -72,7 +64,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); /* Perform partial delivery. */ -void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); +void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t); /* Abort the partial delivery. */ void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h deleted file mode 100644 index 9619b9d35c9..00000000000 --- a/include/net/sctp/user.h +++ /dev/null @@ -1,743 +0,0 @@ -/* SCTP kernel implementation - * (C) Copyright IBM Corp. 2001, 2004 - * Copyright (c) 1999-2000 Cisco, Inc. - * Copyright (c) 1999-2001 Motorola, Inc. - * Copyright (c) 2002 Intel Corp. - * - * This file is part of the SCTP kernel implementation - * - * This header represents the structures and constants needed to support - * the SCTP Extension to the Sockets API. - * - * This SCTP implementation is free software; - * you can redistribute it and/or modify it under the terms of - * the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This SCTP implementation is distributed in the hope that it - * will be useful, but WITHOUT ANY WARRANTY; without even the implied - * ************************ - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - * - * Please send any bug reports or fixes you make to the - * email address(es): - * lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * - * Written or modified by: - * La Monte H.P. Yarroll <piggy@acm.org> - * R. Stewart <randall@sctp.chicago.il.us> - * K. Morneau <kmorneau@cisco.com> - * Q. Xie <qxie1@email.mot.com> - * Karl Knutson <karl@athena.chicago.il.us> - * Jon Grimm <jgrimm@us.ibm.com> - * Daisy Chang <daisyc@us.ibm.com> - * Ryan Layer <rmlayer@us.ibm.com> - * Ardelle Fan <ardelle.fan@intel.com> - * Sridhar Samudrala <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. - */ - -#ifndef __net_sctp_user_h__ -#define __net_sctp_user_h__ - -#include <linux/types.h> -#include <linux/socket.h> - -typedef __s32 sctp_assoc_t; - -/* The following symbols come from the Sockets API Extensions for - * SCTP <draft-ietf-tsvwg-sctpsocket-07.txt>. - */ -enum sctp_optname { - SCTP_RTOINFO, -#define SCTP_RTOINFO SCTP_RTOINFO - SCTP_ASSOCINFO, -#define SCTP_ASSOCINFO SCTP_ASSOCINFO - SCTP_INITMSG, -#define SCTP_INITMSG SCTP_INITMSG - SCTP_NODELAY, /* Get/set nodelay option. */ -#define SCTP_NODELAY SCTP_NODELAY - SCTP_AUTOCLOSE, -#define SCTP_AUTOCLOSE SCTP_AUTOCLOSE - SCTP_SET_PEER_PRIMARY_ADDR, -#define SCTP_SET_PEER_PRIMARY_ADDR SCTP_SET_PEER_PRIMARY_ADDR - SCTP_PRIMARY_ADDR, -#define SCTP_PRIMARY_ADDR SCTP_PRIMARY_ADDR - SCTP_ADAPTATION_LAYER, -#define SCTP_ADAPTATION_LAYER SCTP_ADAPTATION_LAYER - SCTP_DISABLE_FRAGMENTS, -#define SCTP_DISABLE_FRAGMENTS SCTP_DISABLE_FRAGMENTS - SCTP_PEER_ADDR_PARAMS, -#define SCTP_PEER_ADDR_PARAMS SCTP_PEER_ADDR_PARAMS - SCTP_DEFAULT_SEND_PARAM, -#define SCTP_DEFAULT_SEND_PARAM SCTP_DEFAULT_SEND_PARAM - SCTP_EVENTS, -#define SCTP_EVENTS SCTP_EVENTS - SCTP_I_WANT_MAPPED_V4_ADDR, /* Turn on/off mapped v4 addresses */ -#define SCTP_I_WANT_MAPPED_V4_ADDR SCTP_I_WANT_MAPPED_V4_ADDR - SCTP_MAXSEG, /* Get/set maximum fragment. */ -#define SCTP_MAXSEG SCTP_MAXSEG - SCTP_STATUS, -#define SCTP_STATUS SCTP_STATUS - SCTP_GET_PEER_ADDR_INFO, -#define SCTP_GET_PEER_ADDR_INFO SCTP_GET_PEER_ADDR_INFO - SCTP_DELAYED_ACK_TIME, -#define SCTP_DELAYED_ACK_TIME SCTP_DELAYED_ACK_TIME - SCTP_CONTEXT, /* Receive Context */ -#define SCTP_CONTEXT SCTP_CONTEXT - SCTP_FRAGMENT_INTERLEAVE, -#define SCTP_FRAGMENT_INTERLEAVE SCTP_FRAGMENT_INTERLEAVE - SCTP_PARTIAL_DELIVERY_POINT, /* Set/Get partial delivery point */ -#define SCTP_PARTIAL_DELIVERY_POINT SCTP_PARTIAL_DELIVERY_POINT - SCTP_MAX_BURST, /* Set/Get max burst */ -#define SCTP_MAX_BURST SCTP_MAX_BURST - SCTP_AUTH_CHUNK, /* Set only: add a chunk type to authenticat */ -#define SCTP_AUTH_CHUNK SCTP_AUTH_CHUNK - SCTP_HMAC_IDENT, -#define SCTP_HMAC_IDENT SCTP_HMAC_IDENT - SCTP_AUTH_KEY, -#define SCTP_AUTH_KEY SCTP_AUTH_KEY - SCTP_AUTH_ACTIVE_KEY, -#define SCTP_AUTH_ACTIVE_KEY SCTP_AUTH_ACTIVE_KEY - SCTP_AUTH_DELETE_KEY, -#define SCTP_AUTH_DELETE_KEY SCTP_AUTH_DELETE_KEY - SCTP_PEER_AUTH_CHUNKS, /* Read only */ -#define SCTP_PEER_AUTH_CHUNKS SCTP_PEER_AUTH_CHUNKS - SCTP_LOCAL_AUTH_CHUNKS, /* Read only */ -#define SCTP_LOCAL_AUTH_CHUNKS SCTP_LOCAL_AUTH_CHUNKS - - - /* Internal Socket Options. Some of the sctp library functions are - * implemented using these socket options. - */ - SCTP_SOCKOPT_BINDX_ADD = 100,/* BINDX requests for adding addresses. */ -#define SCTP_SOCKOPT_BINDX_ADD SCTP_SOCKOPT_BINDX_ADD - SCTP_SOCKOPT_BINDX_REM, /* BINDX requests for removing addresses. */ -#define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM - SCTP_SOCKOPT_PEELOFF, /* peel off association. */ -#define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF - SCTP_GET_PEER_ADDRS_NUM_OLD, /* Get number of peer addresss. */ -#define SCTP_GET_PEER_ADDRS_NUM_OLD SCTP_GET_PEER_ADDRS_NUM_OLD - SCTP_GET_PEER_ADDRS_OLD, /* Get all peer addresss. */ -#define SCTP_GET_PEER_ADDRS_OLD SCTP_GET_PEER_ADDRS_OLD - SCTP_GET_LOCAL_ADDRS_NUM_OLD, /* Get number of local addresss. */ -#define SCTP_GET_LOCAL_ADDRS_NUM_OLD SCTP_GET_LOCAL_ADDRS_NUM_OLD - SCTP_GET_LOCAL_ADDRS_OLD, /* Get all local addresss. */ -#define SCTP_GET_LOCAL_ADDRS_OLD SCTP_GET_LOCAL_ADDRS_OLD - SCTP_SOCKOPT_CONNECTX, /* CONNECTX requests. */ -#define SCTP_SOCKOPT_CONNECTX SCTP_SOCKOPT_CONNECTX - SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */ -#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS - SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */ -#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS -}; - -/* - * 5.2.1 SCTP Initiation Structure (SCTP_INIT) - * - * This cmsghdr structure provides information for initializing new - * SCTP associations with sendmsg(). The SCTP_INITMSG socket option - * uses this same data structure. This structure is not used for - * recvmsg(). - * - * cmsg_level cmsg_type cmsg_data[] - * ------------ ------------ ---------------------- - * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg - * - */ -struct sctp_initmsg { - __u16 sinit_num_ostreams; - __u16 sinit_max_instreams; - __u16 sinit_max_attempts; - __u16 sinit_max_init_timeo; -}; - -/* - * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) - * - * This cmsghdr structure specifies SCTP options for sendmsg() and - * describes SCTP header information about a received message through - * recvmsg(). - * - * cmsg_level cmsg_type cmsg_data[] - * ------------ ------------ ---------------------- - * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo - * - */ -struct sctp_sndrcvinfo { - __u16 sinfo_stream; - __u16 sinfo_ssn; - __u16 sinfo_flags; - __u32 sinfo_ppid; - __u32 sinfo_context; - __u32 sinfo_timetolive; - __u32 sinfo_tsn; - __u32 sinfo_cumtsn; - sctp_assoc_t sinfo_assoc_id; -}; - -/* - * sinfo_flags: 16 bits (unsigned integer) - * - * This field may contain any of the following flags and is composed of - * a bitwise OR of these values. - */ - -enum sctp_sinfo_flags { - SCTP_UNORDERED = 1, /* Send/receive message unordered. */ - SCTP_ADDR_OVER = 2, /* Override the primary destination. */ - SCTP_ABORT=4, /* Send an ABORT message to the peer. */ - SCTP_EOF=MSG_FIN, /* Initiate graceful shutdown process. */ -}; - - -typedef union { - __u8 raw; - struct sctp_initmsg init; - struct sctp_sndrcvinfo sndrcv; -} sctp_cmsg_data_t; - -/* These are cmsg_types. */ -typedef enum sctp_cmsg_type { - SCTP_INIT, /* 5.2.1 SCTP Initiation Structure */ - SCTP_SNDRCV, /* 5.2.2 SCTP Header Information Structure */ -} sctp_cmsg_t; - - -/* - * 5.3.1.1 SCTP_ASSOC_CHANGE - * - * Communication notifications inform the ULP that an SCTP association - * has either begun or ended. The identifier for a new association is - * provided by this notificaion. The notification information has the - * following format: - * - */ -struct sctp_assoc_change { - __u16 sac_type; - __u16 sac_flags; - __u32 sac_length; - __u16 sac_state; - __u16 sac_error; - __u16 sac_outbound_streams; - __u16 sac_inbound_streams; - sctp_assoc_t sac_assoc_id; - __u8 sac_info[0]; -}; - -/* - * sac_state: 32 bits (signed integer) - * - * This field holds one of a number of values that communicate the - * event that happened to the association. They include: - * - * Note: The following state names deviate from the API draft as - * the names clash too easily with other kernel symbols. - */ -enum sctp_sac_state { - SCTP_COMM_UP, - SCTP_COMM_LOST, - SCTP_RESTART, - SCTP_SHUTDOWN_COMP, - SCTP_CANT_STR_ASSOC, -}; - -/* - * 5.3.1.2 SCTP_PEER_ADDR_CHANGE - * - * When a destination address on a multi-homed peer encounters a change - * an interface details event is sent. The information has the - * following structure: - */ -struct sctp_paddr_change { - __u16 spc_type; - __u16 spc_flags; - __u32 spc_length; - struct sockaddr_storage spc_aaddr; - int spc_state; - int spc_error; - sctp_assoc_t spc_assoc_id; -} __attribute__((packed, aligned(4))); - -/* - * spc_state: 32 bits (signed integer) - * - * This field holds one of a number of values that communicate the - * event that happened to the address. They include: - */ -enum sctp_spc_state { - SCTP_ADDR_AVAILABLE, - SCTP_ADDR_UNREACHABLE, - SCTP_ADDR_REMOVED, - SCTP_ADDR_ADDED, - SCTP_ADDR_MADE_PRIM, - SCTP_ADDR_CONFIRMED, -}; - - -/* - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * A remote peer may send an Operational Error message to its peer. - * This message indicates a variety of error conditions on an - * association. The entire error TLV as it appears on the wire is - * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP - * specification [SCTP] and any extensions for a list of possible - * error formats. SCTP error TLVs have the format: - */ -struct sctp_remote_error { - __u16 sre_type; - __u16 sre_flags; - __u32 sre_length; - __u16 sre_error; - sctp_assoc_t sre_assoc_id; - __u8 sre_data[0]; -}; - - -/* - * 5.3.1.4 SCTP_SEND_FAILED - * - * If SCTP cannot deliver a message it may return the message as a - * notification. - */ -struct sctp_send_failed { - __u16 ssf_type; - __u16 ssf_flags; - __u32 ssf_length; - __u32 ssf_error; - struct sctp_sndrcvinfo ssf_info; - sctp_assoc_t ssf_assoc_id; - __u8 ssf_data[0]; -}; - -/* - * ssf_flags: 16 bits (unsigned integer) - * - * The flag value will take one of the following values - * - * SCTP_DATA_UNSENT - Indicates that the data was never put on - * the wire. - * - * SCTP_DATA_SENT - Indicates that the data was put on the wire. - * Note that this does not necessarily mean that the - * data was (or was not) successfully delivered. - */ -enum sctp_ssf_flags { - SCTP_DATA_UNSENT, - SCTP_DATA_SENT, -}; - -/* - * 5.3.1.5 SCTP_SHUTDOWN_EVENT - * - * When a peer sends a SHUTDOWN, SCTP delivers this notification to - * inform the application that it should cease sending data. - */ -struct sctp_shutdown_event { - __u16 sse_type; - __u16 sse_flags; - __u32 sse_length; - sctp_assoc_t sse_assoc_id; -}; - -/* - * 5.3.1.6 SCTP_ADAPTATION_INDICATION - * - * When a peer sends a Adaptation Layer Indication parameter , SCTP - * delivers this notification to inform the application - * that of the peers requested adaptation layer. - */ -struct sctp_adaptation_event { - __u16 sai_type; - __u16 sai_flags; - __u32 sai_length; - __u32 sai_adaptation_ind; - sctp_assoc_t sai_assoc_id; -}; - -/* - * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT - * - * When a receiver is engaged in a partial delivery of a - * message this notification will be used to indicate - * various events. - */ -struct sctp_pdapi_event { - __u16 pdapi_type; - __u16 pdapi_flags; - __u32 pdapi_length; - __u32 pdapi_indication; - sctp_assoc_t pdapi_assoc_id; -}; - -enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; - -struct sctp_authkey_event { - __u16 auth_type; - __u16 auth_flags; - __u32 auth_length; - __u16 auth_keynumber; - __u16 auth_altkeynumber; - __u32 auth_indication; - sctp_assoc_t auth_assoc_id; -}; - -enum { SCTP_AUTH_NEWKEY = 0, }; - - -/* - * Described in Section 7.3 - * Ancillary Data and Notification Interest Options - */ -struct sctp_event_subscribe { - __u8 sctp_data_io_event; - __u8 sctp_association_event; - __u8 sctp_address_event; - __u8 sctp_send_failure_event; - __u8 sctp_peer_error_event; - __u8 sctp_shutdown_event; - __u8 sctp_partial_delivery_event; - __u8 sctp_adaptation_layer_event; - __u8 sctp_authentication_event; -}; - -/* - * 5.3.1 SCTP Notification Structure - * - * The notification structure is defined as the union of all - * notification types. - * - */ -union sctp_notification { - struct { - __u16 sn_type; /* Notification type. */ - __u16 sn_flags; - __u32 sn_length; - } sn_header; - struct sctp_assoc_change sn_assoc_change; - struct sctp_paddr_change sn_paddr_change; - struct sctp_remote_error sn_remote_error; - struct sctp_send_failed sn_send_failed; - struct sctp_shutdown_event sn_shutdown_event; - struct sctp_adaptation_event sn_adaptation_event; - struct sctp_pdapi_event sn_pdapi_event; - struct sctp_authkey_event sn_authkey_event; -}; - -/* Section 5.3.1 - * All standard values for sn_type flags are greater than 2^15. - * Values from 2^15 and down are reserved. - */ - -enum sctp_sn_type { - SCTP_SN_TYPE_BASE = (1<<15), - SCTP_ASSOC_CHANGE, - SCTP_PEER_ADDR_CHANGE, - SCTP_SEND_FAILED, - SCTP_REMOTE_ERROR, - SCTP_SHUTDOWN_EVENT, - SCTP_PARTIAL_DELIVERY_EVENT, - SCTP_ADAPTATION_INDICATION, - SCTP_AUTHENTICATION_INDICATION, -}; - -/* Notification error codes used to fill up the error fields in some - * notifications. - * SCTP_PEER_ADDRESS_CHAGE : spc_error - * SCTP_ASSOC_CHANGE : sac_error - * These names should be potentially included in the draft 04 of the SCTP - * sockets API specification. - */ -typedef enum sctp_sn_error { - SCTP_FAILED_THRESHOLD, - SCTP_RECEIVED_SACK, - SCTP_HEARTBEAT_SUCCESS, - SCTP_RESPONSE_TO_USER_REQ, - SCTP_INTERNAL_ERROR, - SCTP_SHUTDOWN_GUARD_EXPIRES, - SCTP_PEER_FAULTY, -} sctp_sn_error_t; - -/* - * 7.1.1 Retransmission Timeout Parameters (SCTP_RTOINFO) - * - * The protocol parameters used to initialize and bound retransmission - * timeout (RTO) are tunable. See [SCTP] for more information on how - * these parameters are used in RTO calculation. - */ -struct sctp_rtoinfo { - sctp_assoc_t srto_assoc_id; - __u32 srto_initial; - __u32 srto_max; - __u32 srto_min; -}; - -/* - * 7.1.2 Association Parameters (SCTP_ASSOCINFO) - * - * This option is used to both examine and set various association and - * endpoint parameters. - */ -struct sctp_assocparams { - sctp_assoc_t sasoc_assoc_id; - __u16 sasoc_asocmaxrxt; - __u16 sasoc_number_peer_destinations; - __u32 sasoc_peer_rwnd; - __u32 sasoc_local_rwnd; - __u32 sasoc_cookie_life; -}; - -/* - * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) - * - * Requests that the peer mark the enclosed address as the association - * primary. The enclosed address must be one of the association's - * locally bound addresses. The following structure is used to make a - * set primary request: - */ -struct sctp_setpeerprim { - sctp_assoc_t sspp_assoc_id; - struct sockaddr_storage sspp_addr; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) - * - * Requests that the local SCTP stack use the enclosed peer address as - * the association primary. The enclosed address must be one of the - * association peer's addresses. The following structure is used to - * make a set peer primary request: - */ -struct sctp_prim { - sctp_assoc_t ssp_assoc_id; - struct sockaddr_storage ssp_addr; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) - * - * Requests that the local endpoint set the specified Adaptation Layer - * Indication parameter for all future INIT and INIT-ACK exchanges. - */ -struct sctp_setadaptation { - __u32 ssb_adaptation_ind; -}; - -/* - * 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) - * - * Applications can enable or disable heartbeats for any peer address - * of an association, modify an address's heartbeat interval, force a - * heartbeat to be sent immediately, and adjust the address's maximum - * number of retransmissions sent before an address is considered - * unreachable. The following structure is used to access and modify an - * address's parameters: - */ -enum sctp_spp_flags { - SPP_HB_ENABLE = 1<<0, /*Enable heartbeats*/ - SPP_HB_DISABLE = 1<<1, /*Disable heartbeats*/ - SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE, - SPP_HB_DEMAND = 1<<2, /*Send heartbeat immediately*/ - SPP_PMTUD_ENABLE = 1<<3, /*Enable PMTU discovery*/ - SPP_PMTUD_DISABLE = 1<<4, /*Disable PMTU discovery*/ - SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE, - SPP_SACKDELAY_ENABLE = 1<<5, /*Enable SACK*/ - SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ - SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, - SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ -}; - -struct sctp_paddrparams { - sctp_assoc_t spp_assoc_id; - struct sockaddr_storage spp_address; - __u32 spp_hbinterval; - __u16 spp_pathmaxrxt; - __u32 spp_pathmtu; - __u32 spp_sackdelay; - __u32 spp_flags; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) - * - * This set option adds a chunk type that the user is requesting to be - * received only in an authenticated way. Changes to the list of chunks - * will only effect future associations on the socket. - */ -struct sctp_authchunk { - __u8 sauth_chunk; -}; - -/* - * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) - * - * This option gets or sets the list of HMAC algorithms that the local - * endpoint requires the peer to use. -*/ -struct sctp_hmacalgo { - __u32 shmac_num_idents; - __u16 shmac_idents[]; -}; - -/* - * 7.1.20. Set a shared key (SCTP_AUTH_KEY) - * - * This option will set a shared secret key which is used to build an - * association shared key. - */ -struct sctp_authkey { - sctp_assoc_t sca_assoc_id; - __u16 sca_keynumber; - __u16 sca_keylength; - __u8 sca_key[]; -}; - -/* - * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) - * - * This option will get or set the active shared key to be used to build - * the association shared key. - */ - -struct sctp_authkeyid { - sctp_assoc_t scact_assoc_id; - __u16 scact_keynumber; -}; - - -/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) - * - * This options will get or set the delayed ack timer. The time is set - * in milliseconds. If the assoc_id is 0, then this sets or gets the - * endpoints default delayed ack timer value. If the assoc_id field is - * non-zero, then the set or get effects the specified association. - */ -struct sctp_assoc_value { - sctp_assoc_t assoc_id; - uint32_t assoc_value; -}; - -/* - * 7.2.2 Peer Address Information - * - * Applications can retrieve information about a specific peer address - * of an association, including its reachability state, congestion - * window, and retransmission timer values. This information is - * read-only. The following structure is used to access this - * information: - */ -struct sctp_paddrinfo { - sctp_assoc_t spinfo_assoc_id; - struct sockaddr_storage spinfo_address; - __s32 spinfo_state; - __u32 spinfo_cwnd; - __u32 spinfo_srtt; - __u32 spinfo_rto; - __u32 spinfo_mtu; -} __attribute__((packed, aligned(4))); - -/* Peer addresses's state. */ -/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] - * calls. - * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. - * Not yet confirmed by a heartbeat and not available for data - * transfers. - * ACTIVE : Peer address confirmed, active and available for data transfers. - * INACTIVE: Peer address inactive and not available for data transfers. - */ -enum sctp_spinfo_state { - SCTP_INACTIVE, - SCTP_ACTIVE, - SCTP_UNCONFIRMED, - SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ -}; - -/* - * 7.2.1 Association Status (SCTP_STATUS) - * - * Applications can retrieve current status information about an - * association, including association state, peer receiver window size, - * number of unacked data chunks, and number of data chunks pending - * receipt. This information is read-only. The following structure is - * used to access this information: - */ -struct sctp_status { - sctp_assoc_t sstat_assoc_id; - __s32 sstat_state; - __u32 sstat_rwnd; - __u16 sstat_unackdata; - __u16 sstat_penddata; - __u16 sstat_instrms; - __u16 sstat_outstrms; - __u32 sstat_fragmentation_point; - struct sctp_paddrinfo sstat_primary; -}; - -/* - * 7.2.3. Get the list of chunks the peer requires to be authenticated - * (SCTP_PEER_AUTH_CHUNKS) - * - * This option gets a list of chunks for a specified association that - * the peer requires to be received authenticated only. - */ -struct sctp_authchunks { - sctp_assoc_t gauth_assoc_id; - __u32 gauth_number_of_chunks; - uint8_t gauth_chunks[]; -}; - -/* - * 8.3, 8.5 get all peer/local addresses in an association. - * This parameter struct is used by SCTP_GET_PEER_ADDRS and - * SCTP_GET_LOCAL_ADDRS socket options used internally to implement - * sctp_getpaddrs() and sctp_getladdrs() API. - */ -struct sctp_getaddrs_old { - sctp_assoc_t assoc_id; - int addr_num; - struct sockaddr __user *addrs; -}; -struct sctp_getaddrs { - sctp_assoc_t assoc_id; /*input*/ - __u32 addr_num; /*output*/ - __u8 addrs[0]; /*output, variable size*/ -}; - -/* These are bit fields for msghdr->msg_flags. See section 5.1. */ -/* On user space Linux, these live in <bits/socket.h> as an enum. */ -enum sctp_msg_flags { - MSG_NOTIFICATION = 0x8000, -#define MSG_NOTIFICATION MSG_NOTIFICATION -}; - -/* - * 8.1 sctp_bindx() - * - * The flags parameter is formed from the bitwise OR of zero or more of the - * following currently defined flags: - */ -#define SCTP_BINDX_ADD_ADDR 0x01 -#define SCTP_BINDX_REM_ADDR 0x02 - -/* This is the structure that is passed as an argument(optval) to - * getsockopt(SCTP_SOCKOPT_PEELOFF). - */ -typedef struct { - sctp_assoc_t associd; - int sd; -} sctp_peeloff_arg_t; - -#endif /* __net_sctp_user_h__ */ diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h new file mode 100644 index 00000000000..3f36d45b714 --- /dev/null +++ b/include/net/secure_seq.h @@ -0,0 +1,18 @@ +#ifndef _NET_SECURE_SEQ +#define _NET_SECURE_SEQ + +#include <linux/types.h> + +u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); +u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + __be16 dport); +__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport); +__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport); +u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport); +u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, + __be16 sport, __be16 dport); + +#endif /* _NET_SECURE_SEQ */ diff --git a/include/net/snmp.h b/include/net/snmp.h index ce2f4850751..f1f27fdbb0d 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -14,8 +14,6 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * $Id: snmp.h,v 1.19 2001/06/14 13:40:46 davem Exp $ - * */ #ifndef _SNMP_H @@ -34,7 +32,7 @@ * - name of entries. */ struct snmp_mib { - char *name; + const char *name; int entry; }; @@ -49,68 +47,62 @@ struct snmp_mib { } /* - * We use all unsigned longs. Linux will soon be so reliable that even - * these will rapidly get too small 8-). Seriously consider the IpInReceives - * count on the 20Gb/s + networks people expect in a few years time! + * We use unsigned longs for most mibs but u64 for ipstats. */ - -/* - * The rule for padding: - * Best is power of two because then the right structure can be found by a - * simple shift. The structure should be always cache line aligned. - * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add - * instructions to emulate multiply in case it is not power-of-two. - * Currently n is always <=3 for all sizes so simple cache line alignment - * is enough. - * - * The best solution would be a global CPU local area , especially on 64 - * and 128byte cacheline machine it makes a *lot* of sense -AK - */ - -#define __SNMP_MIB_ALIGN__ ____cacheline_aligned +#include <linux/u64_stats_sync.h> /* IPstats */ #define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX struct ipstats_mib { - unsigned long mibs[IPSTATS_MIB_MAX]; -} __SNMP_MIB_ALIGN__; + /* mibs[] must be first field of struct ipstats_mib */ + u64 mibs[IPSTATS_MIB_MAX]; + struct u64_stats_sync syncp; +}; /* ICMP */ -#define ICMP_MIB_DUMMY __ICMP_MIB_MAX -#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1) - +#define ICMP_MIB_MAX __ICMP_MIB_MAX struct icmp_mib { unsigned long mibs[ICMP_MIB_MAX]; -} __SNMP_MIB_ALIGN__; +}; #define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX struct icmpmsg_mib { - unsigned long mibs[ICMPMSG_MIB_MAX]; -} __SNMP_MIB_ALIGN__; + atomic_long_t mibs[ICMPMSG_MIB_MAX]; +}; /* ICMP6 (IPv6-ICMP) */ #define ICMP6_MIB_MAX __ICMP6_MIB_MAX +/* per network ns counters */ struct icmpv6_mib { unsigned long mibs[ICMP6_MIB_MAX]; -} __SNMP_MIB_ALIGN__; +}; +/* per device counters, (shared on all cpus) */ +struct icmpv6_mib_device { + atomic_long_t mibs[ICMP6_MIB_MAX]; +}; #define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX +/* per network ns counters */ struct icmpv6msg_mib { - unsigned long mibs[ICMP6MSG_MIB_MAX]; -} __SNMP_MIB_ALIGN__; + atomic_long_t mibs[ICMP6MSG_MIB_MAX]; +}; +/* per device counters, (shared on all cpus) */ +struct icmpv6msg_mib_device { + atomic_long_t mibs[ICMP6MSG_MIB_MAX]; +}; /* TCP */ #define TCP_MIB_MAX __TCP_MIB_MAX struct tcp_mib { unsigned long mibs[TCP_MIB_MAX]; -} __SNMP_MIB_ALIGN__; +}; /* UDP */ #define UDP_MIB_MAX __UDP_MIB_MAX struct udp_mib { unsigned long mibs[UDP_MIB_MAX]; -} __SNMP_MIB_ALIGN__; +}; /* Linux */ #define LINUX_MIB_MAX __LINUX_MIB_MAX @@ -124,43 +116,102 @@ struct linux_xfrm_mib { unsigned long mibs[LINUX_MIB_XFRMMAX]; }; -/* - * FIXME: On x86 and some other CPUs the split into user and softirq parts - * is not needed because addl $1,memory is atomic against interrupts (but - * atomic_inc would be overkill because of the lock cycles). Wants new - * nonlocked_atomic_inc() primitives -AK - */ #define DEFINE_SNMP_STAT(type, name) \ - __typeof__(type) *name[2] + __typeof__(type) __percpu *name +#define DEFINE_SNMP_STAT_ATOMIC(type, name) \ + __typeof__(type) *name #define DECLARE_SNMP_STAT(type, name) \ - extern __typeof__(type) *name[2] + extern __typeof__(type) __percpu *name + +#define SNMP_INC_STATS_BH(mib, field) \ + __this_cpu_inc(mib->mibs[field]) + +#define SNMP_INC_STATS_USER(mib, field) \ + this_cpu_inc(mib->mibs[field]) + +#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ + atomic_long_inc(&mib->mibs[field]) -#define SNMP_STAT_BHPTR(name) (name[0]) -#define SNMP_STAT_USRPTR(name) (name[1]) +#define SNMP_INC_STATS(mib, field) \ + this_cpu_inc(mib->mibs[field]) -#define SNMP_INC_STATS_BH(mib, field) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) -#define SNMP_INC_STATS_USER(mib, field) \ +#define SNMP_DEC_STATS(mib, field) \ + this_cpu_dec(mib->mibs[field]) + +#define SNMP_ADD_STATS_BH(mib, field, addend) \ + __this_cpu_add(mib->mibs[field], addend) + +#define SNMP_ADD_STATS_USER(mib, field, addend) \ + this_cpu_add(mib->mibs[field], addend) + +#define SNMP_ADD_STATS(mib, field, addend) \ + this_cpu_add(mib->mibs[field], addend) +/* + * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr" + * to make @ptr a non-percpu pointer. + */ +#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ - put_cpu(); \ + __typeof__(*mib->mibs) *ptr = mib->mibs; \ + this_cpu_inc(ptr[basefield##PKTS]); \ + this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) -#define SNMP_INC_STATS(mib, field) \ +#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ - put_cpu(); \ + __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __this_cpu_inc(ptr[basefield##PKTS]); \ + __this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) -#define SNMP_DEC_STATS(mib, field) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \ - put_cpu(); \ + + +#if BITS_PER_LONG==32 + +#define SNMP_ADD_STATS64_BH(mib, field, addend) \ + do { \ + __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[field] += addend; \ + u64_stats_update_end(&ptr->syncp); \ } while (0) -#define SNMP_ADD_STATS_BH(mib, field, addend) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) -#define SNMP_ADD_STATS_USER(mib, field, addend) \ - do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \ - put_cpu(); \ + +#define SNMP_ADD_STATS64_USER(mib, field, addend) \ + do { \ + local_bh_disable(); \ + SNMP_ADD_STATS64_BH(mib, field, addend); \ + local_bh_enable(); \ } while (0) +#define SNMP_ADD_STATS64(mib, field, addend) \ + SNMP_ADD_STATS64_USER(mib, field, addend) + +#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) +#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1) +#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) +#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ + do { \ + __typeof__(*mib) *ptr; \ + ptr = __this_cpu_ptr(mib); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[basefield##PKTS]++; \ + ptr->mibs[basefield##OCTETS] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + } while (0) +#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ + do { \ + local_bh_disable(); \ + SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ + local_bh_enable(); \ + } while (0) +#else +#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) +#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field) +#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) +#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) +#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) +#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend) +#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) +#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) +#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) +#endif + #endif diff --git a/include/net/sock.h b/include/net/sock.h index fd987608765..15635074570 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -40,24 +40,50 @@ #ifndef _SOCK_H #define _SOCK_H +#include <linux/hardirq.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/list_nulls.h> #include <linux/timer.h> #include <linux/cache.h> -#include <linux/module.h> +#include <linux/bitops.h> #include <linux/lockdep.h> #include <linux/netdevice.h> -#include <linux/pcounter.h> #include <linux/skbuff.h> /* struct sk_buff */ #include <linux/mm.h> #include <linux/security.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/memcontrol.h> +#include <linux/res_counter.h> +#include <linux/static_key.h> +#include <linux/aio.h> +#include <linux/sched.h> #include <linux/filter.h> +#include <linux/rculist_nulls.h> +#include <linux/poll.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <net/dst.h> #include <net/checksum.h> +struct cgroup; +struct cgroup_subsys; +#ifdef CONFIG_NET +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); +#else +static inline +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) +{ + return 0; +} +static inline +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) +{ +} +#endif /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of @@ -70,7 +96,11 @@ #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ printk(KERN_DEBUG msg); } while (0) #else -#define SOCK_DEBUG(sk, msg...) do { } while (0) +/* Validate arguments and do nothing */ +static inline __printf(2, 3) +void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) +{ +} #endif /* This is the per-socket lock. The spinlock provides a synchronization @@ -96,35 +126,96 @@ struct sock; struct proto; struct net; +typedef __u32 __bitwise __portpair; +typedef __u64 __bitwise __addrpair; + /** * struct sock_common - minimal network layer representation of sockets + * @skc_daddr: Foreign IPv4 addr + * @skc_rcv_saddr: Bound local IPv4 addr + * @skc_hash: hash value used with various protocol lookup tables + * @skc_u16hashes: two u16 hash values used by UDP lookup tables + * @skc_dport: placeholder for inet_dport/tw_dport + * @skc_num: placeholder for inet_num/tw_num * @skc_family: network address family * @skc_state: Connection state * @skc_reuse: %SO_REUSEADDR setting + * @skc_reuseport: %SO_REUSEPORT setting * @skc_bound_dev_if: bound device index if != 0 - * @skc_node: main hash linkage for various protocol lookup tables * @skc_bind_node: bind hash linkage for various protocol lookup tables - * @skc_refcnt: reference count - * @skc_hash: hash value used with various protocol lookup tables + * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol * @skc_prot: protocol handlers inside a network family * @skc_net: reference to the network namespace of this socket + * @skc_node: main hash linkage for various protocol lookup tables + * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol + * @skc_tx_queue_mapping: tx queue number for this connection + * @skc_refcnt: reference count * * This is the minimal network layer representation of sockets, the header * for struct sock and struct inet_timewait_sock. */ struct sock_common { + /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned + * address on 64bit arches : cf INET_MATCH() + */ + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + /* skc_dport && skc_num must be grouped as well */ + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + unsigned short skc_family; volatile unsigned char skc_state; - unsigned char skc_reuse; + unsigned char skc_reuse:4; + unsigned char skc_reuseport:4; int skc_bound_dev_if; - struct hlist_node skc_node; - struct hlist_node skc_bind_node; - atomic_t skc_refcnt; - unsigned int skc_hash; + union { + struct hlist_node skc_bind_node; + struct hlist_nulls_node skc_portaddr_node; + }; struct proto *skc_prot; +#ifdef CONFIG_NET_NS struct net *skc_net; +#endif + +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; +#endif + + /* + * fields between dontcopy_begin/dontcopy_end + * are not copied in sock_copy() + */ + /* private: */ + int skc_dontcopy_begin[0]; + /* public: */ + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + int skc_tx_queue_mapping; + atomic_t skc_refcnt; + /* private: */ + int skc_dontcopy_end[0]; + /* public: */ }; +struct cg_proto; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock @@ -132,11 +223,11 @@ struct sock_common { * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock: synchronizer * @sk_rcvbuf: size of receive buffer in bytes - * @sk_sleep: sock wait queue + * @sk_wq: sock wait queue and async head + * @sk_rx_dst: receive input route used by early demux * @sk_dst_cache: destination cache * @sk_dst_lock: destination cache lock * @sk_policy: flow policy - * @sk_rmem_alloc: receive queue bytes committed * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_write_queue: Packet sending queue @@ -144,13 +235,21 @@ struct sock_common { * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_napi_id: id of the last napi context to receive data for sk + * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode + * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) + * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, - * %SO_OOBINLINE settings - * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets + * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings + * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets + * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) + * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) + * @sk_gso_max_size: Maximum GSO segment size to build + * @sk_gso_max_segs: Maximum number of GSO segments * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -160,27 +259,32 @@ struct sock_common { * @sk_err: last error * @sk_err_soft: errors that don't cause failure but are the cause of a * persistent failure not just 'timed out' - * @sk_drops: raw drops counter + * @sk_drops: raw/udp drops counter * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_priority: %SO_PRIORITY setting + * @sk_cgrp_prioidx: socket group's priority map index * @sk_type: socket type (%SOCK_STREAM, etc) * @sk_protocol: which protocol this socket belongs in this network family - * @sk_peercred: %SO_PEERCRED setting + * @sk_peer_pid: &struct pid for this socket's peer + * @sk_peer_cred: %SO_PEERCRED setting * @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting + * @sk_rxhash: flow hash received from netif layer * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data - * @sk_sndmsg_page: cached page for sendmsg - * @sk_sndmsg_off: cached offset for sendmsg + * @sk_frag: cached page frag + * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules * @sk_mark: generic packet mark + * @sk_classid: this socket's cgroup classid + * @sk_cgrp: this socket's cgroup-specific proto data * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -195,88 +299,183 @@ struct sock { * don't add nothing before this first member (__sk_common) --acme */ struct sock_common __sk_common; +#define sk_node __sk_common.skc_node +#define sk_nulls_node __sk_common.skc_nulls_node +#define sk_refcnt __sk_common.skc_refcnt +#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping + +#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin +#define sk_dontcopy_end __sk_common.skc_dontcopy_end +#define sk_hash __sk_common.skc_hash +#define sk_portpair __sk_common.skc_portpair +#define sk_num __sk_common.skc_num +#define sk_dport __sk_common.skc_dport +#define sk_addrpair __sk_common.skc_addrpair +#define sk_daddr __sk_common.skc_daddr +#define sk_rcv_saddr __sk_common.skc_rcv_saddr #define sk_family __sk_common.skc_family #define sk_state __sk_common.skc_state #define sk_reuse __sk_common.skc_reuse +#define sk_reuseport __sk_common.skc_reuseport #define sk_bound_dev_if __sk_common.skc_bound_dev_if -#define sk_node __sk_common.skc_node #define sk_bind_node __sk_common.skc_bind_node -#define sk_refcnt __sk_common.skc_refcnt -#define sk_hash __sk_common.skc_hash #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net - unsigned char sk_shutdown : 2, - sk_no_check : 2, - sk_userlocks : 4; - unsigned char sk_protocol; - unsigned short sk_type; - int sk_rcvbuf; +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + socket_lock_t sk_lock; + struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. + * Note : rmem_alloc is in this structure to fill a hole + * on 64bit arches, not because its logically part of + * backlog. */ struct { - struct sk_buff *head; - struct sk_buff *tail; + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; } sk_backlog; - wait_queue_head_t *sk_sleep; - struct dst_entry *sk_dst_cache; +#define sk_rmem_alloc sk_backlog.rmem_alloc + int sk_forward_alloc; +#ifdef CONFIG_RPS + __u32 sk_rxhash; +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int sk_napi_id; + unsigned int sk_ll_usec; +#endif + atomic_t sk_drops; + int sk_rcvbuf; + + struct sk_filter __rcu *sk_filter; + struct socket_wq __rcu *sk_wq; + +#ifdef CONFIG_NET_DMA + struct sk_buff_head sk_async_wait_queue; +#endif + +#ifdef CONFIG_XFRM struct xfrm_policy *sk_policy[2]; - rwlock_t sk_dst_lock; - atomic_t sk_rmem_alloc; +#endif + unsigned long sk_flags; + struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_dst_cache; + spinlock_t sk_dst_lock; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; - struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; - struct sk_buff_head sk_async_wait_queue; + kmemcheck_bitfield_begin(flags); + unsigned int sk_shutdown : 2, + sk_no_check_tx : 1, + sk_no_check_rx : 1, + sk_userlocks : 4, + sk_protocol : 8, + sk_type : 16; + kmemcheck_bitfield_end(flags); int sk_wmem_queued; - int sk_forward_alloc; gfp_t sk_allocation; - int sk_route_caps; + u32 sk_pacing_rate; /* bytes per second */ + u32 sk_max_pacing_rate; + netdev_features_t sk_route_caps; + netdev_features_t sk_route_nocaps; int sk_gso_type; + unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; int sk_rcvlowat; - unsigned long sk_flags; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err, sk_err_soft; - atomic_t sk_drops; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; - struct ucred sk_peercred; +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) + __u32 sk_cgrp_prioidx; +#endif + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; - struct sk_filter *sk_filter; void *sk_protinfo; struct timer_list sk_timer; ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; - struct page *sk_sndmsg_page; + struct page_frag sk_frag; struct sk_buff *sk_send_head; - __u32 sk_sndmsg_off; + __s32 sk_peek_off; int sk_write_pending; +#ifdef CONFIG_SECURITY void *sk_security; +#endif __u32 sk_mark; - /* XXX 4 bytes hole on 64 bit */ + u32 sk_classid; + struct cg_proto *sk_cgrp; void (*sk_state_change)(struct sock *sk); - void (*sk_data_ready)(struct sock *sk, int bytes); + void (*sk_data_ready)(struct sock *sk); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); - int (*sk_backlog_rcv)(struct sock *sk, - struct sk_buff *skb); + int (*sk_backlog_rcv)(struct sock *sk, + struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); }; +#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) + +#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) +#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) + +/* + * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK + * or not whether his port will be reused by someone else. SK_FORCE_REUSE + * on a socket means that the socket will reuse everybody else's port + * without looking at the other's sk_reuse value. + */ + +#define SK_NO_REUSE 0 +#define SK_CAN_REUSE 1 +#define SK_FORCE_REUSE 2 + +static inline int sk_peek_offset(struct sock *sk, int flags) +{ + if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) + return sk->sk_peek_off; + else + return 0; +} + +static inline void sk_peek_offset_bwd(struct sock *sk, int val) +{ + if (sk->sk_peek_off >= 0) { + if (sk->sk_peek_off >= val) + sk->sk_peek_off -= val; + else + sk->sk_peek_off = 0; + } +} + +static inline void sk_peek_offset_fwd(struct sock *sk, int val) +{ + if (sk->sk_peek_off >= 0) + sk->sk_peek_off += val; +} + /* * Hashed lists helper routines */ +static inline struct sock *sk_entry(const struct hlist_node *node) +{ + return hlist_entry(node, struct sock, sk_node); +} + static inline struct sock *__sk_head(const struct hlist_head *head) { return hlist_entry(head->first, struct sock, sk_node); @@ -287,40 +486,64 @@ static inline struct sock *sk_head(const struct hlist_head *head) return hlist_empty(head) ? NULL : __sk_head(head); } +static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) +{ + return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); +} + +static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) +{ + return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); +} + static inline struct sock *sk_next(const struct sock *sk) { return sk->sk_node.next ? hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; } -static inline int sk_unhashed(const struct sock *sk) +static inline struct sock *sk_nulls_next(const struct sock *sk) +{ + return (!is_a_nulls(sk->sk_nulls_node.next)) ? + hlist_nulls_entry(sk->sk_nulls_node.next, + struct sock, sk_nulls_node) : + NULL; +} + +static inline bool sk_unhashed(const struct sock *sk) { return hlist_unhashed(&sk->sk_node); } -static inline int sk_hashed(const struct sock *sk) +static inline bool sk_hashed(const struct sock *sk) { return !sk_unhashed(sk); } -static __inline__ void sk_node_init(struct hlist_node *node) +static inline void sk_node_init(struct hlist_node *node) { node->pprev = NULL; } -static __inline__ void __sk_del_node(struct sock *sk) +static inline void sk_nulls_node_init(struct hlist_nulls_node *node) +{ + node->pprev = NULL; +} + +static inline void __sk_del_node(struct sock *sk) { __hlist_del(&sk->sk_node); } -static __inline__ int __sk_del_node_init(struct sock *sk) +/* NB: equivalent to hlist_del_init_rcu */ +static inline bool __sk_del_node_init(struct sock *sk) { if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); - return 1; + return true; } - return 0; + return false; } /* Grab socket reference count. This operation is valid only @@ -342,9 +565,31 @@ static inline void __sock_put(struct sock *sk) atomic_dec(&sk->sk_refcnt); } -static __inline__ int sk_del_node_init(struct sock *sk) +static inline bool sk_del_node_init(struct sock *sk) +{ + bool rc = __sk_del_node_init(sk); + + if (rc) { + /* paranoid for a while -acme */ + WARN_ON(atomic_read(&sk->sk_refcnt) == 1); + __sock_put(sk); + } + return rc; +} +#define sk_del_node_init_rcu(sk) sk_del_node_init(sk) + +static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) +{ + if (sk_hashed(sk)) { + hlist_nulls_del_init_rcu(&sk->sk_nulls_node); + return true; + } + return false; +} + +static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) { - int rc = __sk_del_node_init(sk); + bool rc = __sk_nulls_del_node_init_rcu(sk); if (rc) { /* paranoid for a while -acme */ @@ -354,40 +599,71 @@ static __inline__ int sk_del_node_init(struct sock *sk) return rc; } -static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_node, list); } -static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void sk_add_node(struct sock *sk, struct hlist_head *list) { sock_hold(sk); __sk_add_node(sk, list); } -static __inline__ void __sk_del_bind_node(struct sock *sk) +static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) +{ + sock_hold(sk); + hlist_add_head_rcu(&sk->sk_node, list); +} + +static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); +} + +static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + sock_hold(sk); + __sk_nulls_add_node_rcu(sk, list); +} + +static inline void __sk_del_bind_node(struct sock *sk) { __hlist_del(&sk->sk_bind_node); } -static __inline__ void sk_add_bind_node(struct sock *sk, +static inline void sk_add_bind_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_bind_node, list); } -#define sk_for_each(__sk, node, list) \ - hlist_for_each_entry(__sk, node, list, sk_node) -#define sk_for_each_from(__sk, node) \ - if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ - hlist_for_each_entry_from(__sk, node, sk_node) -#define sk_for_each_continue(__sk, node) \ - if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ - hlist_for_each_entry_continue(__sk, node, sk_node) -#define sk_for_each_safe(__sk, node, tmp, list) \ - hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) -#define sk_for_each_bound(__sk, node, list) \ - hlist_for_each_entry(__sk, node, list, sk_bind_node) +#define sk_for_each(__sk, list) \ + hlist_for_each_entry(__sk, list, sk_node) +#define sk_for_each_rcu(__sk, list) \ + hlist_for_each_entry_rcu(__sk, list, sk_node) +#define sk_nulls_for_each(__sk, node, list) \ + hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) +#define sk_nulls_for_each_rcu(__sk, node, list) \ + hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) +#define sk_for_each_from(__sk) \ + hlist_for_each_entry_from(__sk, sk_node) +#define sk_nulls_for_each_from(__sk, node) \ + if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ + hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) +#define sk_for_each_safe(__sk, tmp, list) \ + hlist_for_each_entry_safe(__sk, tmp, list, sk_node) +#define sk_for_each_bound(__sk, list) \ + hlist_for_each_entry(__sk, list, sk_bind_node) + +static inline struct user_namespace *sk_user_ns(struct sock *sk) +{ + /* Careful only use this in a context where these parameters + * can not change and must all be valid, such as recvmsg from + * userspace. + */ + return sk->sk_socket->file->f_cred->user_ns; +} /* Sock flags */ enum sock_flags { @@ -406,6 +682,24 @@ enum sock_flags { SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ + SOCK_MEMALLOC, /* VM depends on this socket for swapping */ + SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ + SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ + SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ + SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ + SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */ + SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */ + SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ + SOCK_FASYNC, /* fasync() active */ + SOCK_RXQ_OVFL, + SOCK_ZEROCOPY, /* buffers from userspace */ + SOCK_WIFI_STATUS, /* push wifi status to userspace */ + SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS. + * Will use last 4 bytes of packet sent from + * user-space instead. + */ + SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ + SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ }; static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) @@ -423,11 +717,31 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } -static inline int sock_flag(struct sock *sk, enum sock_flags flag) +static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); } +#ifdef CONFIG_NET +extern struct static_key memalloc_socks; +static inline int sk_memalloc_socks(void) +{ + return static_key_false(&memalloc_socks); +} +#else + +static inline int sk_memalloc_socks(void) +{ + return 0; +} + +#endif + +static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) +{ + return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); +} + static inline void sk_acceptq_removed(struct sock *sk) { sk->sk_ack_backlog--; @@ -438,7 +752,7 @@ static inline void sk_acceptq_added(struct sock *sk) sk->sk_ack_backlog++; } -static inline int sk_acceptq_is_full(struct sock *sk) +static inline bool sk_acceptq_is_full(const struct sock *sk) { return sk->sk_ack_backlog > sk->sk_max_ack_backlog; } @@ -446,33 +760,123 @@ static inline int sk_acceptq_is_full(struct sock *sk) /* * Compute minimal free write space needed to queue new packets. */ -static inline int sk_stream_min_wspace(struct sock *sk) +static inline int sk_stream_min_wspace(const struct sock *sk) { return sk->sk_wmem_queued >> 1; } -static inline int sk_stream_wspace(struct sock *sk) +static inline int sk_stream_wspace(const struct sock *sk) { return sk->sk_sndbuf - sk->sk_wmem_queued; } -extern void sk_stream_write_space(struct sock *sk); +void sk_stream_write_space(struct sock *sk); + +/* OOB backlog add */ +static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) +{ + /* dont let skb dst not refcounted, we are going to leave rcu lock */ + skb_dst_force(skb); + + if (!sk->sk_backlog.tail) + sk->sk_backlog.head = skb; + else + sk->sk_backlog.tail->next = skb; -static inline int sk_stream_memory_free(struct sock *sk) + sk->sk_backlog.tail = skb; + skb->next = NULL; +} + +/* + * Take into account size of receive queue and backlog queue + * Do not take into account this skb truesize, + * to allow even a single big packet to come. + */ +static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb, + unsigned int limit) { - return sk->sk_wmem_queued < sk->sk_sndbuf; + unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); + + return qsize > limit; } /* The per-socket spinlock must be held here. */ -static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) +static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, + unsigned int limit) { - if (!sk->sk_backlog.tail) { - sk->sk_backlog.head = sk->sk_backlog.tail = skb; - } else { - sk->sk_backlog.tail->next = skb; - sk->sk_backlog.tail = skb; + if (sk_rcvqueues_full(sk, skb, limit)) + return -ENOBUFS; + + __sk_add_backlog(sk, skb); + sk->sk_backlog.len += skb->truesize; + return 0; +} + +int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); + +static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) +{ + if (sk_memalloc_socks() && skb_pfmemalloc(skb)) + return __sk_backlog_rcv(sk, skb); + + return sk->sk_backlog_rcv(sk, skb); +} + +static inline void sock_rps_record_flow_hash(__u32 hash) +{ +#ifdef CONFIG_RPS + struct rps_sock_flow_table *sock_flow_table; + + rcu_read_lock(); + sock_flow_table = rcu_dereference(rps_sock_flow_table); + rps_record_sock_flow(sock_flow_table, hash); + rcu_read_unlock(); +#endif +} + +static inline void sock_rps_reset_flow_hash(__u32 hash) +{ +#ifdef CONFIG_RPS + struct rps_sock_flow_table *sock_flow_table; + + rcu_read_lock(); + sock_flow_table = rcu_dereference(rps_sock_flow_table); + rps_reset_sock_flow(sock_flow_table, hash); + rcu_read_unlock(); +#endif +} + +static inline void sock_rps_record_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + sock_rps_record_flow_hash(sk->sk_rxhash); +#endif +} + +static inline void sock_rps_reset_flow(const struct sock *sk) +{ +#ifdef CONFIG_RPS + sock_rps_reset_flow_hash(sk->sk_rxhash); +#endif +} + +static inline void sock_rps_save_rxhash(struct sock *sk, + const struct sk_buff *skb) +{ +#ifdef CONFIG_RPS + if (unlikely(sk->sk_rxhash != skb->hash)) { + sock_rps_reset_flow(sk); + sk->sk_rxhash = skb->hash; } - skb->next = NULL; +#endif +} + +static inline void sock_rps_reset_rxhash(struct sock *sk) +{ +#ifdef CONFIG_RPS + sock_rps_reset_flow(sk); + sk->sk_rxhash = 0; +#endif } #define sk_wait_event(__sk, __timeo, __condition) \ @@ -487,79 +891,105 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) __rc; \ }) -extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); -extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); -extern void sk_stream_wait_close(struct sock *sk, long timeo_p); -extern int sk_stream_error(struct sock *sk, int flags, int err); -extern void sk_stream_kill_queues(struct sock *sk); +int sk_stream_wait_connect(struct sock *sk, long *timeo_p); +int sk_stream_wait_memory(struct sock *sk, long *timeo_p); +void sk_stream_wait_close(struct sock *sk, long timeo_p); +int sk_stream_error(struct sock *sk, int flags, int err); +void sk_stream_kill_queues(struct sock *sk); +void sk_set_memalloc(struct sock *sk); +void sk_clear_memalloc(struct sock *sk); -extern int sk_wait_data(struct sock *sk, long *timeo); +int sk_wait_data(struct sock *sk, long *timeo); struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; +struct raw_hashinfo; +struct module; + +/* + * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes + * un-modified. Special care is taken when initializing object to zero. + */ +static inline void sk_prot_clear_nulls(struct sock *sk, int size) +{ + if (offsetof(struct sock, sk_node.next) != 0) + memset(sk, 0, offsetof(struct sock, sk_node.next)); + memset(&sk->sk_node.pprev, 0, + size - offsetof(struct sock, sk_node.pprev)); +} /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto */ struct proto { - void (*close)(struct sock *sk, + void (*close)(struct sock *sk, long timeout); int (*connect)(struct sock *sk, - struct sockaddr *uaddr, + struct sockaddr *uaddr, int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept) (struct sock *sk, int flags, int *err); + struct sock * (*accept)(struct sock *sk, int flags, int *err); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); int (*init)(struct sock *sk); - int (*destroy)(struct sock *sk); + void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); - int (*setsockopt)(struct sock *sk, int level, + int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, - int optlen); - int (*getsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - int __user *option); + unsigned int optlen); + int (*getsockopt)(struct sock *sk, int level, + int optname, char __user *optval, + int __user *option); +#ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*compat_getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *option); + int (*compat_ioctl)(struct sock *sk, + unsigned int cmd, unsigned long arg); +#endif int (*sendmsg)(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); int (*recvmsg)(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, - int *addr_len); + size_t len, int noblock, int flags, + int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); - int (*bind)(struct sock *sk, + int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len); - int (*backlog_rcv) (struct sock *sk, + int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); + void (*release_cb)(struct sock *sk); + void (*mtu_reduced)(struct sock *sk); + /* Keeping track of sk's, looking them up, and port selection methods. */ void (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); + void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); + void (*clear_sk)(struct sock *sk, int size); /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS - struct pcounter inuse; + unsigned int inuse_idx; #endif + bool (*stream_memory_free)(const struct sock *sk); /* Memory pressure */ - void (*enter_memory_pressure)(void); - atomic_t *memory_allocated; /* Current allocated memory. */ - atomic_t *sockets_allocated; /* Current number of sockets. */ + void (*enter_memory_pressure)(struct sock *sk); + atomic_long_t *memory_allocated; /* Current allocated memory. */ + struct percpu_counter *sockets_allocated; /* Current number of sockets. */ /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. @@ -567,20 +997,26 @@ struct proto { * is strict, actions are advisory and have some latency. */ int *memory_pressure; - int *sysctl_mem; + long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; + bool no_autobind; - struct kmem_cache *slab; + struct kmem_cache *slab; unsigned int obj_size; + int slab_flags; - atomic_t *orphan_count; + struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; - struct inet_hashinfo *hashinfo; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + } h; struct module *owner; @@ -590,10 +1026,60 @@ struct proto { #ifdef SOCK_REFCNT_DEBUG atomic_t socks; #endif +#ifdef CONFIG_MEMCG_KMEM + /* + * cgroup specific init/deinit functions. Called once for all + * protocols that implement it, from cgroups populate function. + * This function has to setup any files the protocol want to + * appear in the kmem cgroup filesystem. + */ + int (*init_cgroup)(struct mem_cgroup *memcg, + struct cgroup_subsys *ss); + void (*destroy_cgroup)(struct mem_cgroup *memcg); + struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); +#endif +}; + +/* + * Bits in struct cg_proto.flags + */ +enum cg_proto_flags { + /* Currently active and new sockets should be assigned to cgroups */ + MEMCG_SOCK_ACTIVE, + /* It was ever activated; we must disarm static keys on destruction */ + MEMCG_SOCK_ACTIVATED, +}; + +struct cg_proto { + struct res_counter memory_allocated; /* Current allocated memory. */ + struct percpu_counter sockets_allocated; /* Current number of sockets. */ + int memory_pressure; + long sysctl_mem[3]; + unsigned long flags; + /* + * memcg field is used to find which memcg we belong directly + * Each memcg struct can hold more than one cg_proto, so container_of + * won't really cut. + * + * The elegant solution would be having an inverse function to + * proto_cgroup in struct proto, but that means polluting the structure + * for everybody, instead of just for memcg users. + */ + struct mem_cgroup *memcg; }; -extern int proto_register(struct proto *prot, int alloc_slab); -extern void proto_unregister(struct proto *prot); +int proto_register(struct proto *prot, int alloc_slab); +void proto_unregister(struct proto *prot); + +static inline bool memcg_proto_active(struct cg_proto *cg_proto) +{ + return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); +} + +static inline bool memcg_proto_activated(struct cg_proto *cg_proto) +{ + return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags); +} #ifdef SOCK_REFCNT_DEBUG static inline void sk_refcnt_debug_inc(struct sock *sk) @@ -620,38 +1106,228 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET) +extern struct static_key memcg_socket_limit_enabled; +static inline struct cg_proto *parent_cg_proto(struct proto *proto, + struct cg_proto *cg_proto) +{ + return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); +} +#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled) +#else +#define mem_cgroup_sockets_enabled 0 +static inline struct cg_proto *parent_cg_proto(struct proto *proto, + struct cg_proto *cg_proto) +{ + return NULL; +} +#endif -#ifdef CONFIG_PROC_FS -# define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME) -# define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse) -/* Called with local bh disabled */ -static inline void sock_prot_inuse_add(struct proto *prot, int inc) +static inline bool sk_stream_memory_free(const struct sock *sk) +{ + if (sk->sk_wmem_queued >= sk->sk_sndbuf) + return false; + + return sk->sk_prot->stream_memory_free ? + sk->sk_prot->stream_memory_free(sk) : true; +} + +static inline bool sk_stream_is_writeable(const struct sock *sk) +{ + return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && + sk_stream_memory_free(sk); +} + + +static inline bool sk_has_memory_pressure(const struct sock *sk) { - pcounter_add(&prot->inuse, inc); + return sk->sk_prot->memory_pressure != NULL; } -static inline int sock_prot_inuse_init(struct proto *proto) + +static inline bool sk_under_memory_pressure(const struct sock *sk) { - return pcounter_alloc(&proto->inuse); + if (!sk->sk_prot->memory_pressure) + return false; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return !!sk->sk_cgrp->memory_pressure; + + return !!*sk->sk_prot->memory_pressure; } -static inline int sock_prot_inuse_get(struct proto *proto) + +static inline void sk_leave_memory_pressure(struct sock *sk) { - return pcounter_getval(&proto->inuse); + int *memory_pressure = sk->sk_prot->memory_pressure; + + if (!memory_pressure) + return; + + if (*memory_pressure) + *memory_pressure = 0; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + struct proto *prot = sk->sk_prot; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + cg_proto->memory_pressure = 0; + } + } -static inline void sock_prot_inuse_free(struct proto *proto) + +static inline void sk_enter_memory_pressure(struct sock *sk) { - pcounter_free(&proto->inuse); + if (!sk->sk_prot->enter_memory_pressure) + return; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + struct proto *prot = sk->sk_prot; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + cg_proto->memory_pressure = 1; + } + + sk->sk_prot->enter_memory_pressure(sk); } -#else -# define DEFINE_PROTO_INUSE(NAME) -# define REF_PROTO_INUSE(NAME) -static void inline sock_prot_inuse_add(struct proto *prot, int inc) + +static inline long sk_prot_mem_limits(const struct sock *sk, int index) { + long *prot = sk->sk_prot->sysctl_mem; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + prot = sk->sk_cgrp->sysctl_mem; + return prot[index]; } -static int inline sock_prot_inuse_init(struct proto *proto) + +static inline void memcg_memory_allocated_add(struct cg_proto *prot, + unsigned long amt, + int *parent_status) { - return 0; + struct res_counter *fail; + int ret; + + ret = res_counter_charge_nofail(&prot->memory_allocated, + amt << PAGE_SHIFT, &fail); + if (ret < 0) + *parent_status = OVER_LIMIT; } -static void inline sock_prot_inuse_free(struct proto *proto) + +static inline void memcg_memory_allocated_sub(struct cg_proto *prot, + unsigned long amt) +{ + res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); +} + +static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) +{ + u64 ret; + ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); + return ret >> PAGE_SHIFT; +} + +static inline long +sk_memory_allocated(const struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return memcg_memory_allocated_read(sk->sk_cgrp); + + return atomic_long_read(prot->memory_allocated); +} + +static inline long +sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); + /* update the root cgroup regardless */ + atomic_long_add_return(amt, prot->memory_allocated); + return memcg_memory_allocated_read(sk->sk_cgrp); + } + + return atomic_long_add_return(amt, prot->memory_allocated); +} + +static inline void +sk_memory_allocated_sub(struct sock *sk, int amt) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + memcg_memory_allocated_sub(sk->sk_cgrp, amt); + + atomic_long_sub(amt, prot->memory_allocated); +} + +static inline void sk_sockets_allocated_dec(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + percpu_counter_dec(&cg_proto->sockets_allocated); + } + + percpu_counter_dec(prot->sockets_allocated); +} + +static inline void sk_sockets_allocated_inc(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + percpu_counter_inc(&cg_proto->sockets_allocated); + } + + percpu_counter_inc(prot->sockets_allocated); +} + +static inline int +sk_sockets_allocated_read_positive(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); + + return percpu_counter_read_positive(prot->sockets_allocated); +} + +static inline int +proto_sockets_allocated_sum_positive(struct proto *prot) +{ + return percpu_counter_sum_positive(prot->sockets_allocated); +} + +static inline long +proto_memory_allocated(struct proto *prot) +{ + return atomic_long_read(prot->memory_allocated); +} + +static inline bool +proto_memory_pressure(struct proto *prot) +{ + if (!prot->memory_pressure) + return false; + return !!*prot->memory_pressure; +} + + +#ifdef CONFIG_PROC_FS +/* Called with local bh disabled */ +void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); +int sock_prot_inuse_get(struct net *net, struct proto *proto); +#else +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, + int inc) { } #endif @@ -666,6 +1342,8 @@ static inline void __sk_prot_rehash(struct sock *sk) sk->sk_prot->hash(sk); } +void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); + /* About 10 seconds */ #define SOCK_DESTROY_TIME (10*HZ) @@ -722,8 +1400,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket) /* * Functions for memory accounting */ -extern int __sk_mem_schedule(struct sock *sk, int size, int kind); -extern void __sk_mem_reclaim(struct sock *sk); +int __sk_mem_schedule(struct sock *sk, int size, int kind); +void __sk_mem_reclaim(struct sock *sk); #define SK_MEM_QUANTUM ((int)PAGE_SIZE) #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) @@ -735,26 +1413,28 @@ static inline int sk_mem_pages(int amt) return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; } -static inline int sk_has_account(struct sock *sk) +static inline bool sk_has_account(struct sock *sk) { /* return true if protocol supports memory accounting */ return !!sk->sk_prot->memory_allocated; } -static inline int sk_wmem_schedule(struct sock *sk, int size) +static inline bool sk_wmem_schedule(struct sock *sk, int size) { if (!sk_has_account(sk)) - return 1; + return true; return size <= sk->sk_forward_alloc || __sk_mem_schedule(sk, size, SK_MEM_SEND); } -static inline int sk_rmem_schedule(struct sock *sk, int size) +static inline bool +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { if (!sk_has_account(sk)) - return 1; - return size <= sk->sk_forward_alloc || - __sk_mem_schedule(sk, size, SK_MEM_RECV); + return true; + return size<= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, SK_MEM_RECV) || + skb_pfmemalloc(skb); } static inline void sk_mem_reclaim(struct sock *sk) @@ -789,7 +1469,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { - skb_truesize_check(skb); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); @@ -811,6 +1490,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) +static inline void sock_release_ownership(struct sock *sk) +{ + sk->sk_lock.owned = 0; +} + /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. @@ -818,26 +1502,26 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class. */ -#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ +#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ - sk->sk_lock.owned = 0; \ + sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ spin_lock_init(&(sk)->sk_lock.slock); \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ sizeof((sk)->sk_lock)); \ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ - (skey), (sname)); \ + (skey), (sname)); \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) -extern void lock_sock_nested(struct sock *sk, int subclass); +void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) { lock_sock_nested(sk, 0); } -extern void release_sock(struct sock *sk); +void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) @@ -846,160 +1530,98 @@ extern void release_sock(struct sock *sk); SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern struct sock *sk_alloc(struct net *net, int family, - gfp_t priority, - struct proto *prot); -extern void sk_free(struct sock *sk); -extern struct sock *sk_clone(const struct sock *sk, - const gfp_t priority); - -extern struct sk_buff *sock_wmalloc(struct sock *sk, - unsigned long size, int force, - gfp_t priority); -extern struct sk_buff *sock_rmalloc(struct sock *sk, - unsigned long size, int force, - gfp_t priority); -extern void sock_wfree(struct sk_buff *skb); -extern void sock_rfree(struct sk_buff *skb); - -extern int sock_setsockopt(struct socket *sock, int level, - int op, char __user *optval, - int optlen); - -extern int sock_getsockopt(struct socket *sock, int level, - int op, char __user *optval, - int __user *optlen); -extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, - unsigned long size, - int noblock, - int *errcode); -extern void *sock_kmalloc(struct sock *sk, int size, - gfp_t priority); -extern void sock_kfree_s(struct sock *sk, void *mem, int size); -extern void sk_send_sigurg(struct sock *sk); +bool lock_sock_fast(struct sock *sk); +/** + * unlock_sock_fast - complement of lock_sock_fast + * @sk: socket + * @slow: slow mode + * + * fast unlock socket for user context. + * If slow mode is on, we call regular release_sock() + */ +static inline void unlock_sock_fast(struct sock *sk, bool slow) +{ + if (slow) + release_sock(sk); + else + spin_unlock_bh(&sk->sk_lock.slock); +} + + +struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + struct proto *prot); +void sk_free(struct sock *sk); +void sk_release_kernel(struct sock *sk); +struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); + +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority); +void sock_wfree(struct sk_buff *skb); +void skb_orphan_partial(struct sk_buff *skb); +void sock_rfree(struct sk_buff *skb); +void sock_edemux(struct sk_buff *skb); + +int sock_setsockopt(struct socket *sock, int level, int op, + char __user *optval, unsigned int optlen); + +int sock_getsockopt(struct socket *sock, int level, int op, + char __user *optval, int __user *optlen); +struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, + int noblock, int *errcode); +struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, + unsigned long data_len, int noblock, + int *errcode, int max_page_order); +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); +void sock_kfree_s(struct sock *sk, void *mem, int size); +void sk_send_sigurg(struct sock *sk); /* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */ -extern int sock_no_bind(struct socket *, - struct sockaddr *, int); -extern int sock_no_connect(struct socket *, - struct sockaddr *, int, int); -extern int sock_no_socketpair(struct socket *, - struct socket *); -extern int sock_no_accept(struct socket *, - struct socket *, int); -extern int sock_no_getname(struct socket *, - struct sockaddr *, int *, int); -extern unsigned int sock_no_poll(struct file *, struct socket *, - struct poll_table_struct *); -extern int sock_no_ioctl(struct socket *, unsigned int, - unsigned long); -extern int sock_no_listen(struct socket *, int); -extern int sock_no_shutdown(struct socket *, int); -extern int sock_no_getsockopt(struct socket *, int , int, - char __user *, int __user *); -extern int sock_no_setsockopt(struct socket *, int, int, - char __user *, int); -extern int sock_no_sendmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t); -extern int sock_no_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); -extern int sock_no_mmap(struct file *file, - struct socket *sock, - struct vm_area_struct *vma); -extern ssize_t sock_no_sendpage(struct socket *sock, - struct page *page, - int offset, size_t size, - int flags); +int sock_no_bind(struct socket *, struct sockaddr *, int); +int sock_no_connect(struct socket *, struct sockaddr *, int, int); +int sock_no_socketpair(struct socket *, struct socket *); +int sock_no_accept(struct socket *, struct socket *, int); +int sock_no_getname(struct socket *, struct sockaddr *, int *, int); +unsigned int sock_no_poll(struct file *, struct socket *, + struct poll_table_struct *); +int sock_no_ioctl(struct socket *, unsigned int, unsigned long); +int sock_no_listen(struct socket *, int); +int sock_no_shutdown(struct socket *, int); +int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); +int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); +int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); +int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, + int); +int sock_no_mmap(struct file *file, struct socket *sock, + struct vm_area_struct *vma); +ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); /* * Functions to fill in entries in struct proto_ops when a protocol * uses the inet style. */ -extern int sock_common_getsockopt(struct socket *sock, int level, int optname, +int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, +int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags); -extern int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, int optlen); -extern int compat_sock_common_getsockopt(struct socket *sock, int level, +int sock_common_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen); +int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -extern int compat_sock_common_setsockopt(struct socket *sock, int level, - int optname, char __user *optval, int optlen); +int compat_sock_common_setsockopt(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); -extern void sk_common_release(struct sock *sk); +void sk_common_release(struct sock *sk); /* * Default socket callbacks and setup code */ - -/* Initialise core socket variables */ -extern void sock_init_data(struct socket *sock, struct sock *sk); - -/** - * sk_filter - run a packet through a socket filter - * @sk: sock associated with &sk_buff - * @skb: buffer to filter - * @needlock: set to 1 if the sock is not locked by caller. - * - * Run the filter code and then cut skb->data to correct size returned by - * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller - * than pkt_len we keep whole skb->data. This is the socket level - * wrapper to sk_run_filter. It returns 0 if the packet should - * be accepted or -EPERM if the packet should be tossed. - * - */ - -static inline int sk_filter(struct sock *sk, struct sk_buff *skb) -{ - int err; - struct sk_filter *filter; - - err = security_sock_rcv_skb(sk, skb); - if (err) - return err; - - rcu_read_lock_bh(); - filter = rcu_dereference(sk->sk_filter); - if (filter) { - unsigned int pkt_len = sk_run_filter(skb, filter->insns, - filter->len); - err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; - } - rcu_read_unlock_bh(); - return err; -} - -/** - * sk_filter_release: Release a socket filter - * @sk: socket - * @fp: filter to remove - * - * Remove a filter from a socket and release its resources. - */ - -static inline void sk_filter_release(struct sk_filter *fp) -{ - if (atomic_dec_and_test(&fp->refcnt)) - kfree(fp); -} - -static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) -{ - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); - sk_filter_release(fp); -} - -static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) -{ - atomic_inc(&fp->refcnt); - atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); -} +/* Initialise core socket variables */ +void sock_init_data(struct socket *sock, struct sock *sk); /* * Socket reference counting postulates. @@ -1032,10 +1654,39 @@ static inline void sock_put(struct sock *sk) if (atomic_dec_and_test(&sk->sk_refcnt)) sk_free(sk); } +/* Generic version of sock_put(), dealing with all sockets + * (TCP_TIMEWAIT, ESTABLISHED...) + */ +void sock_gen_put(struct sock *sk); + +int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); + +static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) +{ + sk->sk_tx_queue_mapping = tx_queue; +} + +static inline void sk_tx_queue_clear(struct sock *sk) +{ + sk->sk_tx_queue_mapping = -1; +} + +static inline int sk_tx_queue_get(const struct sock *sk) +{ + return sk ? sk->sk_tx_queue_mapping : -1; +} -extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, - const int nested); +static inline void sk_set_socket(struct sock *sk, struct socket *sock) +{ + sk_tx_queue_clear(sk); + sk->sk_socket = sock; +} +static inline wait_queue_head_t *sk_sleep(struct sock *sk) +{ + BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); + return &rcu_dereference_raw(sk->sk_wq)->wait; +} /* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. * Note that parent inode held reference count on this struct sock, @@ -1047,28 +1698,29 @@ static inline void sock_orphan(struct sock *sk) { write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); - sk->sk_socket = NULL; - sk->sk_sleep = NULL; + sk_set_socket(sk, NULL); + sk->sk_wq = NULL; write_unlock_bh(&sk->sk_callback_lock); } static inline void sock_graft(struct sock *sk, struct socket *parent) { write_lock_bh(&sk->sk_callback_lock); - sk->sk_sleep = &parent->wait; + sk->sk_wq = parent->wq; parent->sk = sk; - sk->sk_socket = parent; + sk_set_socket(sk, parent); security_sock_graft(sk, parent); write_unlock_bh(&sk->sk_callback_lock); } -extern int sock_i_uid(struct sock *sk); -extern unsigned long sock_i_ino(struct sock *sk); +kuid_t sock_i_uid(struct sock *sk); +unsigned long sock_i_ino(struct sock *sk); static inline struct dst_entry * __sk_dst_get(struct sock *sk) { - return sk->sk_dst_cache; + return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || + lockdep_is_held(&sk->sk_lock.slock)); } static inline struct dst_entry * @@ -1076,60 +1728,134 @@ sk_dst_get(struct sock *sk) { struct dst_entry *dst; - read_lock(&sk->sk_dst_lock); - dst = sk->sk_dst_cache; - if (dst) - dst_hold(dst); - read_unlock(&sk->sk_dst_lock); + rcu_read_lock(); + dst = rcu_dereference(sk->sk_dst_cache); + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = NULL; + rcu_read_unlock(); return dst; } +static inline void dst_negative_advice(struct sock *sk) +{ + struct dst_entry *ndst, *dst = __sk_dst_get(sk); + + if (dst && dst->ops->negative_advice) { + ndst = dst->ops->negative_advice(dst); + + if (ndst != dst) { + rcu_assign_pointer(sk->sk_dst_cache, ndst); + sk_tx_queue_clear(sk); + } + } +} + static inline void __sk_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old_dst; - old_dst = sk->sk_dst_cache; - sk->sk_dst_cache = dst; + sk_tx_queue_clear(sk); + /* + * This can be called while sk is owned by the caller only, + * with no state that can be checked in a rcu_dereference_check() cond + */ + old_dst = rcu_dereference_raw(sk->sk_dst_cache); + rcu_assign_pointer(sk->sk_dst_cache, dst); dst_release(old_dst); } static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { - write_lock(&sk->sk_dst_lock); - __sk_dst_set(sk, dst); - write_unlock(&sk->sk_dst_lock); + struct dst_entry *old_dst; + + sk_tx_queue_clear(sk); + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); + dst_release(old_dst); } static inline void __sk_dst_reset(struct sock *sk) { - struct dst_entry *old_dst; - - old_dst = sk->sk_dst_cache; - sk->sk_dst_cache = NULL; - dst_release(old_dst); + __sk_dst_set(sk, NULL); } static inline void sk_dst_reset(struct sock *sk) { - write_lock(&sk->sk_dst_lock); - __sk_dst_reset(sk); - write_unlock(&sk->sk_dst_lock); + sk_dst_set(sk, NULL); } -extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); +struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); -extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); +struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); -static inline int sk_can_gso(const struct sock *sk) +static inline bool sk_can_gso(const struct sock *sk) { return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } -extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); +void sk_setup_caps(struct sock *sk, struct dst_entry *dst); + +static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) +{ + sk->sk_route_nocaps |= flags; + sk->sk_route_caps &= ~flags; +} + +static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, + char __user *from, char *to, + int copy, int offset) +{ + if (skb->ip_summed == CHECKSUM_NONE) { + int err = 0; + __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err); + if (err) + return err; + skb->csum = csum_block_add(skb->csum, csum, offset); + } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { + if (!access_ok(VERIFY_READ, from, copy) || + __copy_from_user_nocache(to, from, copy)) + return -EFAULT; + } else if (copy_from_user(to, from, copy)) + return -EFAULT; + + return 0; +} + +static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, + char __user *from, int copy) +{ + int err, offset = skb->len; + + err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), + copy, offset); + if (err) + __skb_trim(skb, offset); + + return err; +} + +static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from, + struct sk_buff *skb, + struct page *page, + int off, int copy) +{ + int err; + + err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, + copy, skb->len); + if (err) + return err; + + skb->len += copy; + skb->data_len += copy; + skb->truesize += copy; + sk->sk_wmem_queued += copy; + sk_mem_charge(sk, copy); + return 0; +} static inline int skb_copy_to_page(struct sock *sk, char __user *from, struct sk_buff *skb, struct page *page, @@ -1154,57 +1880,148 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, return 0; } +/** + * sk_wmem_alloc_get - returns write allocations + * @sk: socket + * + * Returns sk_wmem_alloc minus initial offset of one + */ +static inline int sk_wmem_alloc_get(const struct sock *sk) +{ + return atomic_read(&sk->sk_wmem_alloc) - 1; +} + +/** + * sk_rmem_alloc_get - returns read allocations + * @sk: socket + * + * Returns sk_rmem_alloc + */ +static inline int sk_rmem_alloc_get(const struct sock *sk) +{ + return atomic_read(&sk->sk_rmem_alloc); +} + +/** + * sk_has_allocations - check if allocations are outstanding + * @sk: socket + * + * Returns true if socket has write or read allocations + */ +static inline bool sk_has_allocations(const struct sock *sk) +{ + return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); +} + +/** + * wq_has_sleeper - check if there are any waiting processes + * @wq: struct socket_wq + * + * Returns true if socket_wq has waiting processes + * + * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory + * barrier call. They were added due to the race found within the tcp code. + * + * Consider following tcp code paths: + * + * CPU1 CPU2 + * + * sys_select receive packet + * ... ... + * __add_wait_queue update tp->rcv_nxt + * ... ... + * tp->rcv_nxt check sock_def_readable + * ... { + * schedule rcu_read_lock(); + * wq = rcu_dereference(sk->sk_wq); + * if (wq && waitqueue_active(&wq->wait)) + * wake_up_interruptible(&wq->wait) + * ... + * } + * + * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay + * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 + * could then endup calling schedule and sleep forever if there are no more + * data on the socket. + * + */ +static inline bool wq_has_sleeper(struct socket_wq *wq) +{ + /* We need to be sure we are in sync with the + * add_wait_queue modifications to the wait queue. + * + * This memory barrier is paired in the sock_poll_wait. + */ + smp_mb(); + return wq && waitqueue_active(&wq->wait); +} + +/** + * sock_poll_wait - place memory barrier behind the poll_wait call. + * @filp: file + * @wait_address: socket wait queue + * @p: poll_table + * + * See the comments in the wq_has_sleeper function. + */ +static inline void sock_poll_wait(struct file *filp, + wait_queue_head_t *wait_address, poll_table *p) +{ + if (!poll_does_not_wait(p) && wait_address) { + poll_wait(filp, wait_address, p); + /* We need to be sure we are in sync with the + * socket flags modification. + * + * This memory barrier is paired in the wq_has_sleeper. + */ + smp_mb(); + } +} + /* - * Queue a received datagram if it will fit. Stream and sequenced + * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * - * Inlined as it's very short and called for pretty much every + * Inlined as it's very short and called for pretty much every * packet ever received. */ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { - sock_hold(sk); + skb_orphan(skb); skb->sk = sk; skb->destructor = sock_wfree; + /* + * We used to take a refcount on sk, but following operation + * is enough to guarantee sk_free() wont free this sock until + * all in-flight packets are completed + */ atomic_add(skb->truesize, &sk->sk_wmem_alloc); } static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { + skb_orphan(skb); skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); } -extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, - unsigned long expires); +void sk_reset_timer(struct sock *sk, struct timer_list *timer, + unsigned long expires); -extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); +void sk_stop_timer(struct sock *sk, struct timer_list *timer); -extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) -{ - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces - number of warnings when compiling with -W --ANK - */ - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) - return -ENOMEM; - skb_set_owner_r(skb, sk); - skb_queue_tail(&sk->sk_error_queue, skb); - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, skb->len); - return 0; -} +int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); /* * Recover an error report and clear atomically */ - + static inline int sock_error(struct sock *sk) { int err; @@ -1220,7 +2037,7 @@ static inline unsigned long sock_wspace(struct sock *sk) if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); - if (amt < 0) + if (amt < 0) amt = 0; } return amt; @@ -1228,54 +2045,66 @@ static inline unsigned long sock_wspace(struct sock *sk) static inline void sk_wake_async(struct sock *sk, int how, int band) { - if (sk->sk_socket && sk->sk_socket->fasync_list) + if (sock_flag(sk, SOCK_FASYNC)) sock_wake_async(sk->sk_socket, how, band); } -#define SOCK_MIN_SNDBUF 2048 -#define SOCK_MIN_RCVBUF 256 +/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might + * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. + * Note: for send buffers, TCP works better if we can build two skbs at + * minimum. + */ +#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) + +#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) +#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); - sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); + sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); -static inline struct page *sk_stream_alloc_page(struct sock *sk) +/** + * sk_page_frag - return an appropriate page_frag + * @sk: socket + * + * If socket allocation mode allows current thread to sleep, it means its + * safe to use the per task page_frag instead of the per socket one. + */ +static inline struct page_frag *sk_page_frag(struct sock *sk) { - struct page *page = NULL; + if (sk->sk_allocation & __GFP_WAIT) + return ¤t->task_frag; - page = alloc_pages(sk->sk_allocation, 0); - if (!page) { - sk->sk_prot->enter_memory_pressure(); - sk_stream_moderate_sndbuf(sk); - } - return page; + return &sk->sk_frag; } +bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); + /* * Default write policy as shown to user space via poll/select/SIGIO */ -static inline int sock_writeable(const struct sock *sk) +static inline bool sock_writeable(const struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); } static inline gfp_t gfp_any(void) { - return in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } -static inline long sock_rcvtimeo(const struct sock *sk, int noblock) +static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_rcvtimeo; } -static inline long sock_sndtimeo(const struct sock *sk, int noblock) +static inline long sock_sndtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_sndtimeo; } @@ -1293,20 +2122,69 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } -extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb); +void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); +void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); -static __inline__ void +static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { ktime_t kt = skb->tstamp; + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); - if (sock_flag(sk, SOCK_RCVTSTAMP)) + /* + * generate control messages if + * - receive time stamping in software requested (SOCK_RCVTSTAMP + * or SOCK_TIMESTAMPING_RX_SOFTWARE) + * - software time stamp available and wanted + * (SOCK_TIMESTAMPING_SOFTWARE) + * - hardware time stamps available and wanted + * (SOCK_TIMESTAMPING_SYS_HARDWARE or + * SOCK_TIMESTAMPING_RAW_HARDWARE) + */ + if (sock_flag(sk, SOCK_RCVTSTAMP) || + sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) || + (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) || + (hwtstamps->hwtstamp.tv64 && + sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) || + (hwtstamps->syststamp.tv64 && + sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))) __sock_recv_timestamp(msg, sk, skb); else sk->sk_stamp = kt; + + if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) + __sock_recv_wifi_status(msg, sk, skb); } +void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); + +static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ +#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ + (1UL << SOCK_RCVTSTAMP) | \ + (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ + (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ + (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) + + if (sk->sk_flags & FLAGS_TS_OR_DROPS) + __sock_recv_ts_and_drops(msg, sk, skb); + else + sk->sk_stamp = skb->tstamp; +} + +/** + * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped + * @sk: socket sending this packet + * @tx_flags: filled with instructions for time stamping + * + * Currently only depends on SOCK_TIMESTAMPING* flags. + */ +void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); + /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@ -1317,7 +2195,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) * locked so that the sk_buff queue operation is ok. */ #ifdef CONFIG_NET_DMA -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); if (!copied_early) @@ -1326,19 +2204,66 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e __skb_queue_tail(&sk->sk_async_wait_queue, skb); } #else -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } #endif -extern void sock_enable_timestamp(struct sock *sk); -extern int sock_get_timestamp(struct sock *, struct timeval __user *); -extern int sock_get_timestampns(struct sock *, struct timespec __user *); +static inline +struct net *sock_net(const struct sock *sk) +{ + return read_pnet(&sk->sk_net); +} + +static inline +void sock_net_set(struct sock *sk, struct net *net) +{ + write_pnet(&sk->sk_net, net); +} -/* - * Enable debug/info messages +/* + * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. + * They should not hold a reference to a namespace in order to allow + * to stop it. + * Sockets after sk_change_net should be released using sk_release_kernel + */ +static inline void sk_change_net(struct sock *sk, struct net *net) +{ + struct net *current_net = sock_net(sk); + + if (!net_eq(current_net, net)) { + put_net(current_net); + sock_net_set(sk, hold_net(net)); + } +} + +static inline struct sock *skb_steal_sock(struct sk_buff *skb) +{ + if (skb->sk) { + struct sock *sk = skb->sk; + + skb->destructor = NULL; + skb->sk = NULL; + return sk; + } + return NULL; +} + +void sock_enable_timestamp(struct sock *sk, int flag); +int sock_get_timestamp(struct sock *, struct timeval __user *); +int sock_get_timestampns(struct sock *, struct timespec __user *); +int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, + int type); + +bool sk_ns_capable(const struct sock *sk, + struct user_namespace *user_ns, int cap); +bool sk_capable(const struct sock *sk, int cap); +bool sk_net_capable(const struct sock *sk, int cap); + +/* + * Enable debug/info messages */ extern int net_msg_warn; #define NETDEBUG(fmt, args...) \ @@ -1347,35 +2272,9 @@ extern int net_msg_warn; #define LIMIT_NETDEBUG(fmt, args...) \ do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) -/* - * Macros for sleeping on a socket. Use them like this: - * - * SOCK_SLEEP_PRE(sk) - * if (condition) - * schedule(); - * SOCK_SLEEP_POST(sk) - * - * N.B. These are now obsolete and were, afaik, only ever used in DECnet - * and when the last use of them in DECnet has gone, I'm intending to - * remove them. - */ - -#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ - DECLARE_WAITQUEUE(wait, tsk); \ - tsk->state = TASK_INTERRUPTIBLE; \ - add_wait_queue((sk)->sk_sleep, &wait); \ - release_sock(sk); - -#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ - remove_wait_queue((sk)->sk_sleep, &wait); \ - lock_sock(sk); \ - } - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; -extern void sk_init(void); - extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; diff --git a/include/net/stp.h b/include/net/stp.h new file mode 100644 index 00000000000..3af174d70d9 --- /dev/null +++ b/include/net/stp.h @@ -0,0 +1,14 @@ +#ifndef _NET_STP_H +#define _NET_STP_H + +struct stp_proto { + unsigned char group_address[ETH_ALEN]; + void (*rcv)(const struct stp_proto *, struct sk_buff *, + struct net_device *); + void *data; +}; + +int stp_proto_register(const struct stp_proto *proto); +void stp_proto_unregister(const struct stp_proto *proto); + +#endif /* _NET_STP_H */ diff --git a/include/net/syncppp.h b/include/net/syncppp.h deleted file mode 100644 index 877efa43470..00000000000 --- a/include/net/syncppp.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Defines for synchronous PPP/Cisco link level subroutines. - * - * Copyright (C) 1994 Cronyx Ltd. - * Author: Serge Vakulenko, <vak@zebub.msk.su> - * - * This software is distributed with NO WARRANTIES, not even the implied - * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * Authors grant any other persons or organizations permission to use - * or modify this software as long as this message is kept with the software, - * all derivative works or modified versions. - * - * Version 1.7, Wed Jun 7 22:12:02 MSD 1995 - * - * - * - */ - -#ifndef _SYNCPPP_H_ -#define _SYNCPPP_H_ 1 - -#ifdef __KERNEL__ -struct slcp { - u16 state; /* state machine */ - u32 magic; /* local magic number */ - u_char echoid; /* id of last keepalive echo request */ - u_char confid; /* id of last configuration request */ -}; - -struct sipcp { - u16 state; /* state machine */ - u_char confid; /* id of last configuration request */ -}; - -struct sppp -{ - struct sppp * pp_next; /* next interface in keepalive list */ - u32 pp_flags; /* use Cisco protocol instead of PPP */ - u16 pp_alivecnt; /* keepalive packets counter */ - u16 pp_loopcnt; /* loopback detection counter */ - u32 pp_seq; /* local sequence number */ - u32 pp_rseq; /* remote sequence number */ - struct slcp lcp; /* LCP params */ - struct sipcp ipcp; /* IPCP params */ - u32 ibytes,obytes; /* Bytes in/out */ - u32 ipkts,opkts; /* Packets in/out */ - struct timer_list pp_timer; - struct net_device *pp_if; - char pp_link_state; /* Link status */ - spinlock_t lock; -}; - -struct ppp_device -{ - struct net_device *dev; /* Network device pointer */ - struct sppp sppp; /* Synchronous PPP */ -}; - -static inline struct sppp *sppp_of(struct net_device *dev) -{ - struct ppp_device **ppp = dev->priv; - BUG_ON((*ppp)->dev != dev); - return &(*ppp)->sppp; -} - -#define PP_KEEPALIVE 0x01 /* use keepalive protocol */ -#define PP_CISCO 0x02 /* use Cisco protocol instead of PPP */ -#define PP_TIMO 0x04 /* cp_timeout routine active */ -#define PP_DEBUG 0x08 - -#define PPP_MTU 1500 /* max. transmit unit */ - -#define LCP_STATE_CLOSED 0 /* LCP state: closed (conf-req sent) */ -#define LCP_STATE_ACK_RCVD 1 /* LCP state: conf-ack received */ -#define LCP_STATE_ACK_SENT 2 /* LCP state: conf-ack sent */ -#define LCP_STATE_OPENED 3 /* LCP state: opened */ - -#define IPCP_STATE_CLOSED 0 /* IPCP state: closed (conf-req sent) */ -#define IPCP_STATE_ACK_RCVD 1 /* IPCP state: conf-ack received */ -#define IPCP_STATE_ACK_SENT 2 /* IPCP state: conf-ack sent */ -#define IPCP_STATE_OPENED 3 /* IPCP state: opened */ - -#define SPPP_LINK_DOWN 0 /* link down - no keepalive */ -#define SPPP_LINK_UP 1 /* link is up - keepalive ok */ - -void sppp_attach (struct ppp_device *pd); -void sppp_detach (struct net_device *dev); -int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); -struct sk_buff *sppp_dequeue (struct net_device *dev); -int sppp_isempty (struct net_device *dev); -void sppp_flush (struct net_device *dev); -int sppp_open (struct net_device *dev); -int sppp_reopen (struct net_device *dev); -int sppp_close (struct net_device *dev); -#endif - -#define SPPPIOCCISCO (SIOCDEVPRIVATE) -#define SPPPIOCPPP (SIOCDEVPRIVATE+1) -#define SPPPIOCDEBUG (SIOCDEVPRIVATE+2) -#define SPPPIOCSFLAGS (SIOCDEVPRIVATE+3) -#define SPPPIOCGFLAGS (SIOCDEVPRIVATE+4) - -#endif /* _SYNCPPP_H_ */ diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h new file mode 100644 index 00000000000..fa8f5fac65e --- /dev/null +++ b/include/net/tc_act/tc_csum.h @@ -0,0 +1,15 @@ +#ifndef __NET_TC_CSUM_H +#define __NET_TC_CSUM_H + +#include <linux/types.h> +#include <net/act_api.h> + +struct tcf_csum { + struct tcf_common common; + + u32 update_flags; +}; +#define to_tcf_csum(a) \ + container_of(a->priv,struct tcf_csum,common) + +#endif /* __NET_TC_CSUM_H */ diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h index 65f024b8095..9763dcbb9bc 100644 --- a/include/net/tc_act/tc_defact.h +++ b/include/net/tc_act/tc_defact.h @@ -8,7 +8,7 @@ struct tcf_defact { u32 tcfd_datalen; void *tcfd_defdata; }; -#define to_defact(pc) \ - container_of(pc, struct tcf_defact, common) +#define to_defact(a) \ + container_of(a->priv, struct tcf_defact, common) #endif /* __NET_TC_DEF_H */ diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index 9e3f6767b80..9fc9b578908 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -11,7 +11,7 @@ struct tcf_gact { int tcfg_paction; #endif }; -#define to_gact(pc) \ - container_of(pc, struct tcf_gact, common) +#define to_gact(a) \ + container_of(a->priv, struct tcf_gact, common) #endif /* __NET_TC_GACT_H */ diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h index f7d25dfcc4b..c0f4193f432 100644 --- a/include/net/tc_act/tc_ipt.h +++ b/include/net/tc_act/tc_ipt.h @@ -11,7 +11,7 @@ struct tcf_ipt { char *tcfi_tname; struct xt_entry_target *tcfi_t; }; -#define to_ipt(pc) \ - container_of(pc, struct tcf_ipt, common) +#define to_ipt(a) \ + container_of(a->priv, struct tcf_ipt, common) #endif /* __NET_TC_IPT_H */ diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index ceac661cdfd..4dd77a1c106 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -9,8 +9,9 @@ struct tcf_mirred { int tcfm_ifindex; int tcfm_ok_push; struct net_device *tcfm_dev; + struct list_head tcfm_list; }; -#define to_mirred(pc) \ - container_of(pc, struct tcf_mirred, common) +#define to_mirred(a) \ + container_of(a->priv, struct tcf_mirred, common) #endif /* __NET_TC_MIR_H */ diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h index 4a691f34d70..63d8e9ca9d9 100644 --- a/include/net/tc_act/tc_nat.h +++ b/include/net/tc_act/tc_nat.h @@ -13,9 +13,9 @@ struct tcf_nat { u32 flags; }; -static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc) +static inline struct tcf_nat *to_tcf_nat(struct tc_action *a) { - return container_of(pc, struct tcf_nat, common); + return container_of(a->priv, struct tcf_nat, common); } #endif /* __NET_TC_NAT_H */ diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index e6f6e15956f..5b80998879c 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -9,7 +9,7 @@ struct tcf_pedit { unsigned char tcfp_flags; struct tc_pedit_key *tcfp_keys; }; -#define to_pedit(pc) \ - container_of(pc, struct tcf_pedit, common) +#define to_pedit(a) \ + container_of(a->priv, struct tcf_pedit, common) #endif /* __NET_TC_PED_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h new file mode 100644 index 00000000000..0df9a0db4a8 --- /dev/null +++ b/include/net/tc_act/tc_skbedit.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: Alexander Duyck <alexander.h.duyck@intel.com> + */ + +#ifndef __NET_TC_SKBEDIT_H +#define __NET_TC_SKBEDIT_H + +#include <net/act_api.h> + +struct tcf_skbedit { + struct tcf_common common; + u32 flags; + u32 priority; + u32 mark; + u16 queue_mapping; + /* XXX: 16-bit pad here? */ +}; +#define to_skbedit(a) \ + container_of(a->priv, struct tcf_skbedit, common) + +#endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 7de4ea3a04d..7286db80e8b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -18,17 +18,20 @@ #ifndef _TCP_H #define _TCP_H -#define TCP_DEBUG 1 #define FASTRETRANS_DEBUG 1 #include <linux/list.h> #include <linux/tcp.h> +#include <linux/bug.h> #include <linux/slab.h> #include <linux/cache.h> #include <linux/percpu.h> #include <linux/skbuff.h> #include <linux/dmaengine.h> #include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/kref.h> +#include <linux/ktime.h> #include <net/inet_connection_sock.h> #include <net/inet_timewait_sock.h> @@ -40,15 +43,18 @@ #include <net/ip.h> #include <net/tcp_states.h> #include <net/inet_ecn.h> +#include <net/dst.h> #include <linux/seq_file.h> +#include <linux/memcontrol.h> extern struct inet_hashinfo tcp_hashinfo; -extern atomic_t tcp_orphan_count; -extern void tcp_time_wait(struct sock *sk, int state, int timeo); +extern struct percpu_counter tcp_orphan_count; +void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) +#define MAX_TCP_OPTION_SPACE 40 /* * Never offer a window over 32767 without using window scaling. Some @@ -59,9 +65,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ #define TCP_MIN_MSS 88U -/* Minimal RCV_MSS. */ -#define TCP_MIN_RCVMSS 536U - /* The least MTU to use for probing */ #define TCP_BASE_MSS 512 @@ -93,17 +96,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); * 15 is ~13-30min depending on RTO. */ -#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a - * connection: ~180sec is RFC minimum */ - -#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a - * connection: ~180sec is RFC minimum */ - - -#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned - * socket. 7 is ~50sec-16min. +#define TCP_SYN_RETRIES 6 /* This is how many retries are done + * when active opening a connection. + * RFC1122 says the minimum retry MUST + * be at least 180secs. Nevertheless + * this value is corresponding to + * 63secs of retransmission with the + * current initial RTO. */ +#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done + * when passive opening a connection. + * This is corresponding to 31secs of + * retransmission with the current + * initial RTO. + */ #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT * state, about 60 seconds */ @@ -124,7 +131,13 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #endif #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) -#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ +#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ +#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now + * used as a fallback RTO for the + * initial data transmission if no + * valid RTT sample has been acquired, + * most likely due to retrans in 3WHS. + */ #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. @@ -164,6 +177,11 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#define TCPOPT_EXP 254 /* Experimental */ +/* Magic number to be after the option value for sharing TCP + * experimental options. See draft-ietf-tcpm-experimental-options-00.txt + */ +#define TCPOPT_FASTOPEN_MAGIC 0xF989 /* * TCP option lengths @@ -174,6 +192,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_SACK_PERM 2 #define TCPOLEN_TIMESTAMP 10 #define TCPOLEN_MD5SIG 18 +#define TCPOLEN_EXP_FASTOPEN_BASE 4 /* But this is what stacks really send out. */ #define TCPOLEN_TSTAMP_ALIGNED 12 @@ -183,12 +202,33 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_SACK_BASE_ALIGNED 4 #define TCPOLEN_SACK_PERBLOCK 8 #define TCPOLEN_MD5SIG_ALIGNED 20 +#define TCPOLEN_MSS_ALIGNED 4 /* Flags in tp->nonagle */ #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ #define TCP_NAGLE_CORK 2 /* Socket is corked */ #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ +/* TCP thin-stream limits */ +#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ + +/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */ +#define TCP_INIT_CWND 10 + +/* Bit Flags for sysctl_tcp_fastopen */ +#define TFO_CLIENT_ENABLE 1 +#define TFO_SERVER_ENABLE 2 +#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ + +/* Accept SYN data w/o any cookie option */ +#define TFO_SERVER_COOKIE_NOT_REQD 0x200 + +/* Force enable TFO on all listeners, i.e., not requiring the + * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen. + */ +#define TFO_SERVER_WO_SOCKOPT1 0x400 +#define TFO_SERVER_WO_SOCKOPT2 0x800 + extern struct inet_timewait_death_row tcp_death_row; /* sysctl variables for tcp */ @@ -205,6 +245,7 @@ extern int sysctl_tcp_retries1; extern int sysctl_tcp_retries2; extern int sysctl_tcp_orphan_retries; extern int sysctl_tcp_syncookies; +extern int sysctl_tcp_fastopen; extern int sysctl_tcp_retrans_collapse; extern int sysctl_tcp_stdurg; extern int sysctl_tcp_rfc1337; @@ -212,30 +253,34 @@ extern int sysctl_tcp_abort_on_overflow; extern int sysctl_tcp_max_orphans; extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; -extern int sysctl_tcp_ecn; extern int sysctl_tcp_dsack; -extern int sysctl_tcp_mem[3]; +extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_app_win; extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; -extern int sysctl_tcp_frto_response; extern int sysctl_tcp_low_latency; extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; -extern int sysctl_tcp_abc; extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_base_mss; extern int sysctl_tcp_workaround_signed_windows; extern int sysctl_tcp_slow_start_after_idle; -extern int sysctl_tcp_max_ssthresh; - -extern atomic_t tcp_memory_allocated; -extern atomic_t tcp_sockets_allocated; +extern int sysctl_tcp_thin_linear_timeouts; +extern int sysctl_tcp_thin_dupack; +extern int sysctl_tcp_early_retrans; +extern int sysctl_tcp_limit_output_bytes; +extern int sysctl_tcp_challenge_ack_limit; +extern unsigned int sysctl_tcp_notsent_lowat; +extern int sysctl_tcp_min_tso_segs; +extern int sysctl_tcp_autocorking; + +extern atomic_long_t tcp_memory_allocated; +extern struct percpu_counter tcp_sockets_allocated; extern int tcp_memory_pressure; /* @@ -243,74 +288,92 @@ extern int tcp_memory_pressure; * and worry about wraparound (automatic with unsigned arithmetic). */ -static inline int before(__u32 seq1, __u32 seq2) +static inline bool before(__u32 seq1, __u32 seq2) { return (__s32)(seq1-seq2) < 0; } #define after(seq2, seq1) before(seq1, seq2) /* is s2<=s1<=s3 ? */ -static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) +static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) { return seq3 - seq2 >= seq1 - seq2; } -static inline int tcp_too_many_orphans(struct sock *sk, int num) +static inline bool tcp_out_of_memory(struct sock *sk) { - return (num > sysctl_tcp_max_orphans) || - (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && - atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); + if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && + sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) + return true; + return false; } -extern struct proto tcp_prot; - -DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics); -#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field) -#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field) -#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field) -#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field) -#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val) -#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val) - -extern void tcp_v4_err(struct sk_buff *skb, u32); - -extern void tcp_shutdown (struct sock *sk, int how); - -extern int tcp_v4_rcv(struct sk_buff *skb); - -extern int tcp_v4_remember_stamp(struct sock *sk); - -extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); - -extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size); -extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); - -extern int tcp_ioctl(struct sock *sk, - int cmd, - unsigned long arg); - -extern int tcp_rcv_state_process(struct sock *sk, - struct sk_buff *skb, - struct tcphdr *th, - unsigned len); +static inline bool tcp_too_many_orphans(struct sock *sk, int shift) +{ + struct percpu_counter *ocp = sk->sk_prot->orphan_count; + int orphans = percpu_counter_read_positive(ocp); -extern int tcp_rcv_established(struct sock *sk, - struct sk_buff *skb, - struct tcphdr *th, - unsigned len); + if (orphans << shift > sysctl_tcp_max_orphans) { + orphans = percpu_counter_sum_positive(ocp); + if (orphans << shift > sysctl_tcp_max_orphans) + return true; + } + return false; +} -extern void tcp_rcv_space_adjust(struct sock *sk); +bool tcp_check_oom(struct sock *sk, int shift); -extern void tcp_cleanup_rbuf(struct sock *sk, int copied); +/* syncookies: remember time of last synqueue overflow */ +static inline void tcp_synq_overflow(struct sock *sk) +{ + tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; +} -extern int tcp_twsk_unique(struct sock *sk, - struct sock *sktw, void *twp); +/* syncookies: no recent synqueue overflow on this listening socket? */ +static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) +{ + unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); +} -extern void tcp_twsk_destructor(struct sock *sk); +extern struct proto tcp_prot; -extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, unsigned int flags); +#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) +#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) +#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) +#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) +#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) + +void tcp_tasklet_init(void); + +void tcp_v4_err(struct sk_buff *skb, u32); + +void tcp_shutdown(struct sock *sk, int how); + +void tcp_v4_early_demux(struct sk_buff *skb); +int tcp_v4_rcv(struct sk_buff *skb); + +int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); +int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t size); +int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, + int flags); +void tcp_release_cb(struct sock *sk); +void tcp_wfree(struct sk_buff *skb); +void tcp_write_timer_handler(struct sock *sk); +void tcp_delack_timer_handler(struct sock *sk); +int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, unsigned int len); +void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, unsigned int len); +void tcp_rcv_space_adjust(struct sock *sk); +void tcp_cleanup_rbuf(struct sock *sk, int copied); +int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); +void tcp_twsk_destructor(struct sock *sk); +ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) @@ -327,26 +390,12 @@ static inline void tcp_dec_quickack_mode(struct sock *sk, } } -extern void tcp_enter_quickack_mode(struct sock *sk); - -static inline void tcp_clear_options(struct tcp_options_received *rx_opt) -{ - rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; -} - #define TCP_ECN_OK 1 #define TCP_ECN_QUEUE_CWR 2 #define TCP_ECN_DEMAND_CWR 4 +#define TCP_ECN_SEEN 8 -static __inline__ void -TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) -{ - if (sysctl_tcp_ecn && th->ece && th->cwr) - inet_rsk(req)->ecn_ok = 1; -} - -enum tcp_tw_status -{ +enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, TCP_TW_ACK = 2, @@ -354,139 +403,218 @@ enum tcp_tw_status }; -extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, - struct sk_buff *skb, - const struct tcphdr *th); - -extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, - struct request_sock *req, - struct request_sock **prev); -extern int tcp_child_process(struct sock *parent, - struct sock *child, - struct sk_buff *skb); -extern int tcp_use_frto(struct sock *sk); -extern void tcp_enter_frto(struct sock *sk); -extern void tcp_enter_loss(struct sock *sk, int how); -extern void tcp_clear_retrans(struct tcp_sock *tp); -extern void tcp_update_metrics(struct sock *sk); - -extern void tcp_close(struct sock *sk, - long timeout); -extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); - -extern int tcp_getsockopt(struct sock *sk, int level, - int optname, - char __user *optval, - int __user *optlen); -extern int tcp_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, - int optlen); -extern int compat_tcp_getsockopt(struct sock *sk, - int level, int optname, - char __user *optval, int __user *optlen); -extern int compat_tcp_setsockopt(struct sock *sk, - int level, int optname, - char __user *optval, int optlen); -extern void tcp_set_keepalive(struct sock *sk, int val); -extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, - size_t len, int nonblock, - int flags, int *addr_len); - -extern void tcp_parse_options(struct sk_buff *skb, - struct tcp_options_received *opt_rx, - int estab); +enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, + struct sk_buff *skb, + const struct tcphdr *th); +struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, struct request_sock **prev, + bool fastopen); +int tcp_child_process(struct sock *parent, struct sock *child, + struct sk_buff *skb); +void tcp_enter_loss(struct sock *sk, int how); +void tcp_clear_retrans(struct tcp_sock *tp); +void tcp_update_metrics(struct sock *sk); +void tcp_init_metrics(struct sock *sk); +void tcp_metrics_init(void); +bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, + bool paws_check); +bool tcp_remember_stamp(struct sock *sk); +bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); +void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); +void tcp_disable_fack(struct tcp_sock *tp); +void tcp_close(struct sock *sk, long timeout); +void tcp_init_sock(struct sock *sk); +unsigned int tcp_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +int tcp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int tcp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +int compat_tcp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int compat_tcp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +void tcp_set_keepalive(struct sock *sk, int val); +void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len, int nonblock, int flags, int *addr_len); +void tcp_parse_options(const struct sk_buff *skb, + struct tcp_options_received *opt_rx, + int estab, struct tcp_fastopen_cookie *foc); +const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* * TCP v4 functions exported for the inet6 API */ -extern void tcp_v4_send_check(struct sock *sk, int len, - struct sk_buff *skb); - -extern int tcp_v4_conn_request(struct sock *sk, - struct sk_buff *skb); +void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); +int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); +struct sock *tcp_create_openreq_child(struct sock *sk, + struct request_sock *req, + struct sk_buff *skb); +struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); +int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); +int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +int tcp_connect(struct sock *sk); +struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, + struct request_sock *req, + struct tcp_fastopen_cookie *foc); +int tcp_disconnect(struct sock *sk, int flags); + +void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); +int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); -extern struct sock * tcp_create_openreq_child(struct sock *sk, - struct request_sock *req, - struct sk_buff *skb); - -extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, - struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst); - -extern int tcp_v4_do_rcv(struct sock *sk, - struct sk_buff *skb); - -extern int tcp_v4_connect(struct sock *sk, - struct sockaddr *uaddr, - int addr_len); - -extern int tcp_connect(struct sock *sk); - -extern struct sk_buff * tcp_make_synack(struct sock *sk, - struct dst_entry *dst, - struct request_sock *req); +/* From syncookies.c */ +int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, + u32 cookie); +struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, + struct ip_options *opt); +#ifdef CONFIG_SYN_COOKIES + +/* Syncookies use a monotonic timer which increments every 60 seconds. + * This counter is used both as a hash input and partially encoded into + * the cookie value. A cookie is only validated further if the delta + * between the current counter value and the encoded one is less than this, + * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if + * the counter advances immediately after a cookie is generated). + */ +#define MAX_SYNCOOKIE_AGE 2 -extern int tcp_disconnect(struct sock *sk, int flags); +static inline u32 tcp_cookie_time(void) +{ + u64 val = get_jiffies_64(); -extern void tcp_unhash(struct sock *sk); + do_div(val, 60 * HZ); + return val; +} -/* From syncookies.c */ -extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, - struct ip_options *opt); -extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, - __u16 *mss); +u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, + u16 *mssp); +__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); +#else +static inline __u32 cookie_v4_init_sequence(struct sock *sk, + struct sk_buff *skb, + __u16 *mss) +{ + return 0; +} +#endif +__u32 cookie_init_timestamp(struct request_sock *req); +bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net, + bool *ecn_ok); + +/* From net/ipv6/syncookies.c */ +int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, + u32 cookie); +struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); +#ifdef CONFIG_SYN_COOKIES +u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, + const struct tcphdr *th, u16 *mssp); +__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, + __u16 *mss); +#else +static inline __u32 cookie_v6_init_sequence(struct sock *sk, + struct sk_buff *skb, + __u16 *mss) +{ + return 0; +} +#endif /* tcp_output.c */ -extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, - int nonagle); -extern int tcp_may_send_now(struct sock *sk); -extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); -extern void tcp_xmit_retransmit_queue(struct sock *); -extern void tcp_simple_retransmit(struct sock *); -extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); - -extern void tcp_send_probe0(struct sock *); -extern void tcp_send_partial(struct sock *); -extern int tcp_write_wakeup(struct sock *); -extern void tcp_send_fin(struct sock *sk); -extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); -extern int tcp_send_synack(struct sock *); -extern void tcp_push_one(struct sock *, unsigned int mss_now); -extern void tcp_send_ack(struct sock *sk); -extern void tcp_send_delayed_ack(struct sock *sk); +void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, + int nonagle); +bool tcp_may_send_now(struct sock *sk); +int __tcp_retransmit_skb(struct sock *, struct sk_buff *); +int tcp_retransmit_skb(struct sock *, struct sk_buff *); +void tcp_retransmit_timer(struct sock *sk); +void tcp_xmit_retransmit_queue(struct sock *); +void tcp_simple_retransmit(struct sock *); +int tcp_trim_head(struct sock *, struct sk_buff *, u32); +int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); + +void tcp_send_probe0(struct sock *); +void tcp_send_partial(struct sock *); +int tcp_write_wakeup(struct sock *); +void tcp_send_fin(struct sock *sk); +void tcp_send_active_reset(struct sock *sk, gfp_t priority); +int tcp_send_synack(struct sock *); +bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb, + const char *proto); +void tcp_push_one(struct sock *, unsigned int mss_now); +void tcp_send_ack(struct sock *sk); +void tcp_send_delayed_ack(struct sock *sk); +void tcp_send_loss_probe(struct sock *sk); +bool tcp_schedule_loss_probe(struct sock *sk); /* tcp_input.c */ -extern void tcp_cwnd_application_limited(struct sock *sk); +void tcp_resume_early_retransmit(struct sock *sk); +void tcp_rearm_rto(struct sock *sk); +void tcp_reset(struct sock *sk); /* tcp_timer.c */ -extern void tcp_init_xmit_timers(struct sock *); +void tcp_init_xmit_timers(struct sock *); static inline void tcp_clear_xmit_timers(struct sock *sk) { inet_csk_clear_xmit_timers(sk); } -extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); -extern unsigned int tcp_current_mss(struct sock *sk, int large); +unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); +unsigned int tcp_current_mss(struct sock *sk); + +/* Bound MSS / TSO packet size with the half of the window */ +static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) +{ + int cutoff; + + /* When peer uses tiny windows, there is no use in packetizing + * to sub-MSS pieces for the sake of SWS or making sure there + * are enough packets in the pipe for fast recovery. + * + * On the other hand, for extremely large MSS devices, handling + * smaller than MSS windows in this way does make sense. + */ + if (tp->max_window >= 512) + cutoff = (tp->max_window >> 1); + else + cutoff = tp->max_window; + + if (cutoff && pktsize > cutoff) + return max_t(int, cutoff, 68U - tp->tcp_header_len); + else + return pktsize; +} /* tcp.c */ -extern void tcp_get_info(struct sock *, struct tcp_info *); +void tcp_get_info(const struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); -extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, - sk_read_actor_t recv_actor); +int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor); -extern void tcp_initialize_rcv_mss(struct sock *sk); +void tcp_initialize_rcv_mss(struct sock *sk); -extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); -extern int tcp_mss_to_mtu(struct sock *sk, int mss); -extern void tcp_mtup_init(struct sock *sk); +int tcp_mtu_to_mss(struct sock *sk, int pmtu); +int tcp_mss_to_mtu(struct sock *sk, int mss); +void tcp_mtup_init(struct sock *sk); +void tcp_init_buffer_space(struct sock *sk); + +static inline void tcp_bound_rto(const struct sock *sk) +{ + if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) + inet_csk(sk)->icsk_rto = TCP_RTO_MAX; +} + +static inline u32 __tcp_set_rto(const struct tcp_sock *tp) +{ + return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); +} static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) { @@ -511,6 +639,22 @@ static inline void tcp_fast_path_check(struct sock *sk) tcp_fast_path_on(tp); } +/* Compute the actual rto_min value */ +static inline u32 tcp_rto_min(struct sock *sk) +{ + const struct dst_entry *dst = __sk_dst_get(sk); + u32 rto_min = TCP_RTO_MIN; + + if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) + rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); + return rto_min; +} + +static inline u32 tcp_rto_min_us(struct sock *sk) +{ + return jiffies_to_usecs(tcp_rto_min(sk)); +} + /* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. @@ -528,7 +672,9 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp) * scaling applied to the result. The caller does these things * if necessary. This is a "raw" window selection. */ -extern u32 __tcp_select_window(struct sock *sk); +u32 __tcp_select_window(struct sock *sk); + +void tcp_send_window_probe(struct sock *sk); /* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot @@ -538,52 +684,67 @@ extern u32 __tcp_select_window(struct sock *sk); */ #define tcp_time_stamp ((__u32)(jiffies)) +#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) + +#define TCPHDR_FIN 0x01 +#define TCPHDR_SYN 0x02 +#define TCPHDR_RST 0x04 +#define TCPHDR_PSH 0x08 +#define TCPHDR_ACK 0x10 +#define TCPHDR_URG 0x20 +#define TCPHDR_ECE 0x40 +#define TCPHDR_CWR 0x80 + /* This is what the send packet queuing engine uses to pass - * TCP per-packet control information to the transmission - * code. We also store the host-order sequence numbers in - * here too. This is 36 bytes on 32-bit architectures, - * 40 bytes on 64-bit machines, if this grows please adjust - * skbuff.h:skbuff->cb[xxx] size appropriately. + * TCP per-packet control information to the transmission code. + * We also store the host-order sequence numbers in here too. + * This is 44 bytes if IPV6 is enabled. + * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. */ struct tcp_skb_cb { union { struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; /* For incoming frames */ __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ - __u8 flags; /* TCP header flags. */ - - /* NOTE: These must match up to the flags byte in a - * real TCP header. - */ -#define TCPCB_FLAG_FIN 0x01 -#define TCPCB_FLAG_SYN 0x02 -#define TCPCB_FLAG_RST 0x04 -#define TCPCB_FLAG_PSH 0x08 -#define TCPCB_FLAG_ACK 0x10 -#define TCPCB_FLAG_URG 0x20 -#define TCPCB_FLAG_ECE 0x40 -#define TCPCB_FLAG_CWR 0x80 + __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK/FACK. */ #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_LOST 0x04 /* SKB is lost */ #define TCPCB_TAGBITS 0x07 /* All tag bits */ - #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) - __u16 urg_ptr; /* Valid w/URG flags is set. */ + __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ + /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) +/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set + * + * If we receive a SYN packet with these bits set, it means a network is + * playing bad games with TOS bits. In order to avoid possible false congestion + * notifications, we disable TCP ECN negociation. + */ +static inline void +TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb, + struct net *net) +{ + const struct tcphdr *th = tcp_hdr(skb); + + if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr && + INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) + inet_rsk(req)->ecn_ok = 1; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ @@ -598,27 +759,11 @@ static inline int tcp_skb_mss(const struct sk_buff *skb) return skb_shinfo(skb)->gso_size; } -static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr) -{ - if (*count) { - *count -= decr; - if ((int)*count < 0) - *count = 0; - } -} - -static inline void tcp_dec_pcount_approx(__u32 *count, - const struct sk_buff *skb) -{ - tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb)); -} - /* Events passed to congestion control interface */ enum tcp_ca_event { CA_EVENT_TX_START, /* first transmit when no packets in flight */ CA_EVENT_CWND_RESTART, /* congestion window restart */ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ - CA_EVENT_FRTO, /* fast recovery timeout */ CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_FAST_ACK, /* in sequence ack */ CA_EVENT_SLOW_ACK, /* other ack */ @@ -632,7 +777,6 @@ enum tcp_ca_event { #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) #define TCP_CONG_NON_RESTRICTED 0x1 -#define TCP_CONG_RTT_STAMP 0x2 struct tcp_congestion_ops { struct list_head list; @@ -645,10 +789,8 @@ struct tcp_congestion_ops { /* return slow start threshold (required) */ u32 (*ssthresh)(struct sock *sk); - /* lower bound for congestion window (optional) */ - u32 (*min_cwnd)(const struct sock *sk); /* do new cwnd calculation (required) */ - void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); + void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); /* call before changing ca_state (optional) */ void (*set_state)(struct sock *sk, u8 new_state); /* call when cwnd event occurs (optional) */ @@ -664,23 +806,23 @@ struct tcp_congestion_ops { struct module *owner; }; -extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); -extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); +int tcp_register_congestion_control(struct tcp_congestion_ops *type); +void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); -extern void tcp_init_congestion_control(struct sock *sk); -extern void tcp_cleanup_congestion_control(struct sock *sk); -extern int tcp_set_default_congestion_control(const char *name); -extern void tcp_get_default_congestion_control(char *name); -extern void tcp_get_available_congestion_control(char *buf, size_t len); -extern void tcp_get_allowed_congestion_control(char *buf, size_t len); -extern int tcp_set_allowed_congestion_control(char *allowed); -extern int tcp_set_congestion_control(struct sock *sk, const char *name); -extern void tcp_slow_start(struct tcp_sock *tp); +void tcp_init_congestion_control(struct sock *sk); +void tcp_cleanup_congestion_control(struct sock *sk); +int tcp_set_default_congestion_control(const char *name); +void tcp_get_default_congestion_control(char *name); +void tcp_get_available_congestion_control(char *buf, size_t len); +void tcp_get_allowed_congestion_control(char *buf, size_t len); +int tcp_set_allowed_congestion_control(char *allowed); +int tcp_set_congestion_control(struct sock *sk, const char *name); +int tcp_slow_start(struct tcp_sock *tp, u32 acked); +void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); extern struct tcp_congestion_ops tcp_init_congestion_ops; -extern u32 tcp_reno_ssthresh(struct sock *sk); -extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); -extern u32 tcp_reno_min_cwnd(const struct sock *sk); +u32 tcp_reno_ssthresh(struct sock *sk); +void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); extern struct tcp_congestion_ops tcp_reno; static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) @@ -713,19 +855,34 @@ static inline int tcp_is_sack(const struct tcp_sock *tp) return tp->rx_opt.sack_ok; } -static inline int tcp_is_reno(const struct tcp_sock *tp) +static inline bool tcp_is_reno(const struct tcp_sock *tp) { return !tcp_is_sack(tp); } -static inline int tcp_is_fack(const struct tcp_sock *tp) +static inline bool tcp_is_fack(const struct tcp_sock *tp) { - return tp->rx_opt.sack_ok & 2; + return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; } static inline void tcp_enable_fack(struct tcp_sock *tp) { - tp->rx_opt.sack_ok |= 2; + tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; +} + +/* TCP early-retransmit (ER) is similar to but more conservative than + * the thin-dupack feature. Enable ER only if thin-dupack is disabled. + */ +static inline void tcp_enable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = sysctl_tcp_early_retrans && + sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && + sysctl_tcp_reordering == 3; +} + +static inline void tcp_disable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = 0; } static inline unsigned int tcp_left_out(const struct tcp_sock *tp) @@ -752,14 +909,28 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; } +#define TCP_INFINITE_SSTHRESH 0x7fffffff + +static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) +{ + return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static inline bool tcp_in_cwnd_reduction(const struct sock *sk) +{ + return (TCPF_CA_CWR | TCPF_CA_Recovery) & + (1 << inet_csk(sk)->icsk_ca_state); +} + /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. - * The exception is rate halving phase, when cwnd is decreasing towards + * The exception is cwnd reduction phase, when cwnd is decreasing towards * ssthresh. */ static inline __u32 tcp_current_ssthresh(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) + + if (tcp_in_cwnd_reduction(sk)) return tp->snd_ssthresh; else return max(tp->snd_ssthresh, @@ -770,15 +941,26 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) /* Use define here intentionally to get WARN_ON location shown at the caller */ #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) -extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); -extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); +void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); +__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); + +/* The maximum number of MSS of available cwnd for which TSO defers + * sending if not using sysctl_tcp_tso_win_divisor. + */ +static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) +{ + return 3; +} /* Slow start with delack produces 3 packets of burst, so that - * it is safe "de facto". + * it is safe "de facto". This will be the default - same as + * the default reordering threshold - but if reordering increases, + * we must be able to allow cwnd to burst at least this much in order + * to not pull it back when holes are filled. */ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) { - return 3; + return tp->reordering; } /* Returns end sequence number of the receiver's advertised window */ @@ -786,18 +968,34 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp) { return tp->snd_una + tp->snd_wnd; } -extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); -static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, - const struct sk_buff *skb) +/* We follow the spirit of RFC2861 to validate cwnd but implement a more + * flexible approach. The RFC suggests cwnd should not be raised unless + * it was fully used previously. And that's exactly what we do in + * congestion avoidance mode. But in slow start we allow cwnd to grow + * as long as the application has used half the cwnd. + * Example : + * cwnd is 10 (IW10), but application sends 9 frames. + * We allow cwnd to reach 18 when all frames are ACKed. + * This check is safe because it's as aggressive as slow start which already + * risks 100% overshoot. The advantage is that we discourage application to + * either send more filler packets or data to artificially blow up the cwnd + * usage, and allow application-limited process to probe bw more aggressively. + */ +static inline bool tcp_is_cwnd_limited(const struct sock *sk) { - if (skb->len < mss) - tp->snd_sml = TCP_SKB_CB(skb)->end_seq; + const struct tcp_sock *tp = tcp_sk(sk); + + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ + if (tp->snd_cwnd <= tp->snd_ssthresh) + return tp->snd_cwnd < 2 * tp->max_packets_out; + + return tp->is_cwnd_limited; } static inline void tcp_check_probe_timer(struct sock *sk) { - struct tcp_sock *tp = tcp_sk(sk); + const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); if (!tp->packets_out && !icsk->icsk_pending) @@ -805,19 +1003,12 @@ static inline void tcp_check_probe_timer(struct sock *sk) icsk->icsk_rto, TCP_RTO_MAX); } -static inline void tcp_push_pending_frames(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); -} - -static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) +static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) { tp->snd_wl1 = seq; } -static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) +static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) { tp->snd_wl1 = seq; } @@ -836,7 +1027,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) return __skb_checksum_complete(skb); } -static inline int tcp_checksum_complete(struct sk_buff *skb) +static inline bool tcp_checksum_complete(struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete(skb); @@ -858,44 +1049,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) #endif } -/* Packet is added to VJ-style prequeue for processing in process - * context, if a reader task is waiting. Apparently, this exciting - * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) - * failed somewhere. Latency? Burstiness? Well, at least now we will - * see, why it failed. 8)8) --ANK - * - * NOTE: is this not too big to inline? - */ -static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (!sysctl_tcp_low_latency && tp->ucopy.task) { - __skb_queue_tail(&tp->ucopy.prequeue, skb); - tp->ucopy.memory += skb->truesize; - if (tp->ucopy.memory > sk->sk_rcvbuf) { - struct sk_buff *skb1; - - BUG_ON(sock_owned_by_user(sk)); - - while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { - sk->sk_backlog_rcv(sk, skb1); - NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED); - } - - tp->ucopy.memory = 0; - } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { - wake_up_interruptible(sk->sk_sleep); - if (!inet_csk_ack_scheduled(sk)) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - (3 * TCP_RTO_MIN) / 4, - TCP_RTO_MAX); - } - return 1; - } - return 0; -} - +bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); #undef STATE_TRACE @@ -906,21 +1060,22 @@ static const char *statename[]={ "Close Wait","Last ACK","Listen","Closing" }; #endif -extern void tcp_set_state(struct sock *sk, int state); +void tcp_set_state(struct sock *sk, int state); -extern void tcp_done(struct sock *sk); +void tcp_done(struct sock *sk); static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) { rx_opt->dsack = 0; - rx_opt->eff_sacks = 0; rx_opt->num_sacks = 0; } +u32 tcp_default_init_rwnd(u32 mss); + /* Determine a window scaling and initial window to offer. */ -extern void tcp_select_initial_window(int __space, __u32 mss, - __u32 *rcv_wnd, __u32 *window_clamp, - int wscale_ok, __u8 *rcv_wscale); +void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, + __u32 *window_clamp, int wscale_ok, + __u8 *rcv_wscale, __u32 init_rcv_wnd); static inline int tcp_win_from_space(int space) { @@ -948,7 +1103,10 @@ static inline void tcp_openreq_init(struct request_sock *req, struct inet_request_sock *ireq = inet_rsk(req); req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ + req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; + tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; + tcp_rsk(req)->snt_synack = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; ireq->tstamp_ok = rx_opt->tstamp_ok; @@ -957,10 +1115,14 @@ static inline void tcp_openreq_init(struct request_sock *req, ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; - ireq->rmt_port = tcp_hdr(skb)->source; + ireq->ir_rmt_port = tcp_hdr(skb)->source; + ireq->ir_num = ntohs(tcp_hdr(skb)->dest); } -extern void tcp_enter_memory_pressure(void); +extern void tcp_openreq_init_rwin(struct request_sock *req, + struct sock *sk, struct dst_entry *dst); + +void tcp_enter_memory_pressure(struct sock *sk); static inline int keepalive_intvl_when(const struct tcp_sock *tp) { @@ -972,6 +1134,19 @@ static inline int keepalive_time_when(const struct tcp_sock *tp) return tp->keepalive_time ? : sysctl_tcp_keepalive_time; } +static inline int keepalive_probes(const struct tcp_sock *tp) +{ + return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; +} + +static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) +{ + const struct inet_connection_sock *icsk = &tp->inet_conn; + + return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, + tcp_time_stamp - tp->rcv_tstamp); +} + static inline int tcp_fin_time(const struct sock *sk) { int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; @@ -983,12 +1158,28 @@ static inline int tcp_fin_time(const struct sock *sk) return fin_timeout; } -static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst) +static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, + int paws_win) { - if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) - return 0; - if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) - return 0; + if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) + return true; + if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) + return true; + /* + * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, + * then following tcp messages have valid values. Ignore 0 value, + * or else 'negative' tsval might forbid us to accept their packets. + */ + if (!rx_opt->ts_recent) + return true; + return false; +} + +static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, + int rst) +{ + if (tcp_paws_check(rx_opt, 0)) + return false; /* RST segments are not recommended to carry timestamp, and, if they do, it is recommended to ignore PAWS because @@ -1003,67 +1194,55 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int However, we can relax time bounds for RST segments to MSL. */ if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) - return 0; - return 1; + return false; + return true; } -#define TCP_CHECK_TIMER(sk) do { } while (0) - -static inline void tcp_mib_init(void) +static inline void tcp_mib_init(struct net *net) { /* See RFC 2012 */ - TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1); - TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); - TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); - TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1); + TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); + TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); + TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); + TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); } /* from STCP */ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) { tp->lost_skb_hint = NULL; - tp->scoreboard_skb_hint = NULL; - tp->retransmit_skb_hint = NULL; - tp->forward_skb_hint = NULL; } static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) { tcp_clear_retrans_hints_partial(tp); + tp->retransmit_skb_hint = NULL; } /* MD5 Signature */ struct crypto_hash; +union tcp_md5_addr { + struct in_addr a4; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr a6; +#endif +}; + /* - key database */ struct tcp_md5sig_key { - u8 *key; + struct hlist_node node; u8 keylen; -}; - -struct tcp4_md5sig_key { - struct tcp_md5sig_key base; - __be32 addr; -}; - -struct tcp6_md5sig_key { - struct tcp_md5sig_key base; -#if 0 - u32 scope_id; /* XXX */ -#endif - struct in6_addr addr; + u8 family; /* AF_INET or AF_INET6 */ + union tcp_md5_addr addr; + u8 key[TCP_MD5SIG_MAXKEYLEN]; + struct rcu_head rcu; }; /* - sock block */ struct tcp_md5sig_info { - struct tcp4_md5sig_key *keys4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - struct tcp6_md5sig_key *keys6; - u32 entries6; - u32 alloced6; -#endif - u32 entries4; - u32 alloced4; + struct hlist_head head; + struct rcu_head rcu; }; /* - pseudo header */ @@ -1084,7 +1263,7 @@ struct tcp6_pseudohdr { union tcp_md5sum_block { struct tcp4_pseudohdr ip4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct tcp6_pseudohdr ip6; #endif }; @@ -1095,50 +1274,77 @@ struct tcp_md5sig_pool { union tcp_md5sum_block md5_blk; }; -#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ - /* - functions */ -extern int tcp_v4_calc_md5_hash(char *md5_hash, - struct tcp_md5sig_key *key, - struct sock *sk, - struct dst_entry *dst, - struct request_sock *req, - struct tcphdr *th, - int protocol, - unsigned int tcplen); -extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, - struct sock *addr_sk); - -extern int tcp_v4_md5_do_add(struct sock *sk, - __be32 addr, - u8 *newkey, - u8 newkeylen); - -extern int tcp_v4_md5_do_del(struct sock *sk, - __be32 addr); - -extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void); -extern void tcp_free_md5sig_pool(void); +int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, + const struct sock *sk, const struct request_sock *req, + const struct sk_buff *skb); +int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, + int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); +int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, + int family); +struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, + struct sock *addr_sk); -extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); -extern void __tcp_put_md5sig_pool(void); - -static inline -struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) +#ifdef CONFIG_TCP_MD5SIG +struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, + const union tcp_md5_addr *addr, + int family); +#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) +#else +static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, + const union tcp_md5_addr *addr, + int family) { - int cpu = get_cpu(); - struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); - if (!ret) - put_cpu(); - return ret; + return NULL; } +#define tcp_twsk_md5_key(twsk) NULL +#endif -static inline void tcp_put_md5sig_pool(void) +bool tcp_alloc_md5sig_pool(void); + +struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); +static inline void tcp_put_md5sig_pool(void) { - __tcp_put_md5sig_pool(); - put_cpu(); + local_bh_enable(); } +int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); +int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, + unsigned int header_len); +int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, + const struct tcp_md5sig_key *key); + +/* From tcp_fastopen.c */ +void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, + struct tcp_fastopen_cookie *cookie, int *syn_loss, + unsigned long *last_syn_loss); +void tcp_fastopen_cache_set(struct sock *sk, u16 mss, + struct tcp_fastopen_cookie *cookie, bool syn_lost); +struct tcp_fastopen_request { + /* Fast Open cookie. Size 0 means a cookie request */ + struct tcp_fastopen_cookie cookie; + struct msghdr *data; /* data in MSG_FASTOPEN */ + size_t size; + int copied; /* queued in tcp_connect() */ +}; +void tcp_free_fastopen_req(struct tcp_sock *tp); + +extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; +int tcp_fastopen_reset_cipher(void *key, unsigned int len); +bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct tcp_fastopen_cookie *foc, + struct dst_entry *dst); +void tcp_fastopen_init_key_once(bool publish); +#define TCP_FASTOPEN_KEY_LENGTH 16 + +/* Fastopen key context */ +struct tcp_fastopen_context { + struct crypto_cipher *tfm; + __u8 key[TCP_FASTOPEN_KEY_LENGTH]; + struct rcu_head rcu; +}; + /* write queue abstraction */ static inline void tcp_write_queue_purge(struct sock *sk) { @@ -1147,53 +1353,57 @@ static inline void tcp_write_queue_purge(struct sock *sk) while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) sk_wmem_free_skb(sk, skb); sk_mem_reclaim(sk); + tcp_clear_all_retrans_hints(tcp_sk(sk)); +} + +static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) +{ + return skb_peek(&sk->sk_write_queue); } -static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) +static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) { - struct sk_buff *skb = sk->sk_write_queue.next; - if (skb == (struct sk_buff *) &sk->sk_write_queue) - return NULL; - return skb; + return skb_peek_tail(&sk->sk_write_queue); } -static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) +static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, + const struct sk_buff *skb) { - struct sk_buff *skb = sk->sk_write_queue.prev; - if (skb == (struct sk_buff *) &sk->sk_write_queue) - return NULL; - return skb; + return skb_queue_next(&sk->sk_write_queue, skb); } -static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) +static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, + const struct sk_buff *skb) { - return skb->next; + return skb_queue_prev(&sk->sk_write_queue, skb); } #define tcp_for_write_queue(skb, sk) \ - for (skb = (sk)->sk_write_queue.next; \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = skb->next) + skb_queue_walk(&(sk)->sk_write_queue, skb) #define tcp_for_write_queue_from(skb, sk) \ - for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ - skb = skb->next) + skb_queue_walk_from(&(sk)->sk_write_queue, skb) #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ - for (tmp = skb->next; \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = tmp, tmp = skb->next) + skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) -static inline struct sk_buff *tcp_send_head(struct sock *sk) +static inline struct sk_buff *tcp_send_head(const struct sock *sk) { return sk->sk_send_head; } -static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) +static inline bool tcp_skb_is_last(const struct sock *sk, + const struct sk_buff *skb) { - sk->sk_send_head = skb->next; - if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) + return skb_queue_is_last(&sk->sk_write_queue, skb); +} + +static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) +{ + if (tcp_skb_is_last(sk, skb)) sk->sk_send_head = NULL; + else + sk->sk_send_head = tcp_write_queue_next(sk, skb); } static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) @@ -1235,15 +1445,15 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb, struct sk_buff *buff, struct sock *sk) { - __skb_append(skb, buff, &sk->sk_write_queue); + __skb_queue_after(&sk->sk_write_queue, skb, buff); } -/* Insert skb between prev and next on the write queue of sk. */ +/* Insert new before skb on the write queue of sk. */ static inline void tcp_insert_write_queue_before(struct sk_buff *new, struct sk_buff *skb, struct sock *sk) { - __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); + __skb_queue_before(&sk->sk_write_queue, skb, new); if (sk->sk_send_head == skb) sk->sk_send_head = new; @@ -1254,19 +1464,23 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) __skb_unlink(skb, &sk->sk_write_queue); } -static inline int tcp_skb_is_last(const struct sock *sk, - const struct sk_buff *skb) +static inline bool tcp_write_queue_empty(struct sock *sk) { - return skb->next == (struct sk_buff *)&sk->sk_write_queue; + return skb_queue_empty(&sk->sk_write_queue); } -static inline int tcp_write_queue_empty(struct sock *sk) +static inline void tcp_push_pending_frames(struct sock *sk) { - return skb_queue_empty(&sk->sk_write_queue); + if (tcp_send_head(sk)) { + struct tcp_sock *tp = tcp_sk(sk); + + __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); + } } -/* Start sequence of the highest skb with SACKed bit, valid only if - * sacked > 0 or when the caller has ensured validity by itself. +/* Start sequence of the skb just after the highest skb with SACKed + * bit, valid only if sacked_out > 0 or when the caller has ensured + * validity by itself. */ static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) { @@ -1304,43 +1518,71 @@ static inline void tcp_highest_sack_combine(struct sock *sk, tcp_sk(sk)->highest_sack = new; } +/* Determines whether this is a thin stream (which may suffer from + * increased latency). Used to trigger latency-reducing mechanisms. + */ +static inline bool tcp_stream_is_thin(struct tcp_sock *tp) +{ + return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); +} + /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, TCP_SEQ_STATE_OPENREQ, TCP_SEQ_STATE_ESTABLISHED, - TCP_SEQ_STATE_TIME_WAIT, }; +int tcp_seq_open(struct inode *inode, struct file *file); + struct tcp_seq_afinfo { - struct module *owner; - char *name; - sa_family_t family; - int (*seq_show) (struct seq_file *m, void *v); - struct file_operations *seq_fops; + char *name; + sa_family_t family; + const struct file_operations *seq_fops; + struct seq_operations seq_ops; }; struct tcp_iter_state { + struct seq_net_private p; sa_family_t family; enum tcp_seq_states state; struct sock *syn_wait_sk; - int bucket, sbucket, num, uid; - struct seq_operations seq_ops; + int bucket, offset, sbucket, num; + kuid_t uid; + loff_t last_pos; }; -extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); -extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); +int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); +void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); extern struct request_sock_ops tcp_request_sock_ops; +extern struct request_sock_ops tcp6_request_sock_ops; + +void tcp_v4_destroy_sock(struct sock *sk); -extern int tcp_v4_destroy_sock(struct sock *sk); +struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + netdev_features_t features); +struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); +int tcp_gro_complete(struct sk_buff *skb); -extern int tcp_v4_gso_send_check(struct sk_buff *skb); -extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); +void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); + +static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) +{ + return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat; +} + +static inline bool tcp_stream_memory_free(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + u32 notsent_bytes = tp->write_seq - tp->snd_nxt; + + return notsent_bytes < tcp_notsent_lowat(tp); +} #ifdef CONFIG_PROC_FS -extern int tcp4_proc_init(void); -extern void tcp4_proc_exit(void); +int tcp4_proc_init(void); +void tcp4_proc_exit(void); #endif /* TCP af-specific functions */ @@ -1350,16 +1592,9 @@ struct tcp_sock_af_ops { struct sock *addr_sk); int (*calc_md5_hash) (char *location, struct tcp_md5sig_key *md5, - struct sock *sk, - struct dst_entry *dst, - struct request_sock *req, - struct tcphdr *th, - int protocol, - unsigned int len); - int (*md5_add) (struct sock *sk, - struct sock *addr_sk, - u8 *newkey, - u8 len); + const struct sock *sk, + const struct request_sock *req, + const struct sk_buff *skb); int (*md5_parse) (struct sock *sk, char __user *optval, int optlen); @@ -1370,10 +1605,17 @@ struct tcp_request_sock_ops { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, struct request_sock *req); + int (*calc_md5_hash) (char *location, + struct tcp_md5sig_key *md5, + const struct sock *sk, + const struct request_sock *req, + const struct sk_buff *skb); #endif }; -extern void tcp_v4_init(struct net_proto_family *ops); -extern void tcp_init(void); +int tcpv4_offload_init(void); + +void tcp_v4_init(void); +void tcp_init(void); #endif /* _TCP_H */ diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h new file mode 100644 index 00000000000..05b94d9453d --- /dev/null +++ b/include/net/tcp_memcontrol.h @@ -0,0 +1,7 @@ +#ifndef _TCP_MEMCG_H +#define _TCP_MEMCG_H + +struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); +int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void tcp_destroy_cgroup(struct mem_cgroup *memcg); +#endif /* _TCP_MEMCG_H */ diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h index 1e1ee3253fd..68f0ecad6c6 100644 --- a/include/net/timewait_sock.h +++ b/include/net/timewait_sock.h @@ -12,10 +12,12 @@ #define _TIMEWAIT_SOCK_H #include <linux/slab.h> +#include <linux/bug.h> #include <net/sock.h> struct timewait_sock_ops { struct kmem_cache *twsk_slab; + char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *sk, struct sock *sktw, void *twp); diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h deleted file mode 100644 index 9566608c88c..00000000000 --- a/include/net/tipc/tipc.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - * include/net/tipc/tipc.h: Main include file for TIPC users - * - * Copyright (c) 2003-2006, Ericsson AB - * Copyright (c) 2005, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _NET_TIPC_H_ -#define _NET_TIPC_H_ - -#ifdef __KERNEL__ - -#include <linux/tipc.h> -#include <linux/skbuff.h> - -/* - * Native API - */ - -/* - * TIPC operating mode routines - */ - -u32 tipc_get_addr(void); - -#define TIPC_NOT_RUNNING 0 -#define TIPC_NODE_MODE 1 -#define TIPC_NET_MODE 2 - -typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr); - -int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle); - -void tipc_detach(unsigned int userref); - -int tipc_get_mode(void); - -/* - * TIPC port manipulation routines - */ - -typedef void (*tipc_msg_err_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size, - int reason, - struct tipc_portid const *attmpt_destid); - -typedef void (*tipc_named_msg_err_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size, - int reason, - struct tipc_name_seq const *attmpt_dest); - -typedef void (*tipc_conn_shutdown_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size, - int reason); - -typedef void (*tipc_msg_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size, - unsigned int importance, - struct tipc_portid const *origin); - -typedef void (*tipc_named_msg_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size, - unsigned int importance, - struct tipc_portid const *orig, - struct tipc_name_seq const *dest); - -typedef void (*tipc_conn_msg_event) (void *usr_handle, - u32 portref, - struct sk_buff **buf, - unsigned char const *data, - unsigned int size); - -typedef void (*tipc_continue_event) (void *usr_handle, - u32 portref); - -int tipc_createport(unsigned int tipc_user, - void *usr_handle, - unsigned int importance, - tipc_msg_err_event error_cb, - tipc_named_msg_err_event named_error_cb, - tipc_conn_shutdown_event conn_error_cb, - tipc_msg_event message_cb, - tipc_named_msg_event named_message_cb, - tipc_conn_msg_event conn_message_cb, - tipc_continue_event continue_event_cb,/* May be zero */ - u32 *portref); - -int tipc_deleteport(u32 portref); - -int tipc_ownidentity(u32 portref, struct tipc_portid *port); - -int tipc_portimportance(u32 portref, unsigned int *importance); -int tipc_set_portimportance(u32 portref, unsigned int importance); - -int tipc_portunreliable(u32 portref, unsigned int *isunreliable); -int tipc_set_portunreliable(u32 portref, unsigned int isunreliable); - -int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); -int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); - -int tipc_publish(u32 portref, unsigned int scope, - struct tipc_name_seq const *name_seq); -int tipc_withdraw(u32 portref, unsigned int scope, - struct tipc_name_seq const *name_seq); /* 0: all */ - -int tipc_connect2port(u32 portref, struct tipc_portid const *port); - -int tipc_disconnect(u32 portref); - -int tipc_shutdown(u32 ref); /* Sends SHUTDOWN msg */ - -int tipc_isconnected(u32 portref, int *isconnected); - -int tipc_peer(u32 portref, struct tipc_portid *peer); - -int tipc_ref_valid(u32 portref); - -/* - * TIPC messaging routines - */ - -#define TIPC_PORT_IMPORTANCE 100 /* send using current port setting */ - - -int tipc_send(u32 portref, - unsigned int num_sect, - struct iovec const *msg_sect); - -int tipc_send_buf(u32 portref, - struct sk_buff *buf, - unsigned int dsz); - -int tipc_send2name(u32 portref, - struct tipc_name const *name, - u32 domain, /* 0:own zone */ - unsigned int num_sect, - struct iovec const *msg_sect); - -int tipc_send_buf2name(u32 portref, - struct tipc_name const *name, - u32 domain, - struct sk_buff *buf, - unsigned int dsz); - -int tipc_forward2name(u32 portref, - struct tipc_name const *name, - u32 domain, /*0: own zone */ - unsigned int section_count, - struct iovec const *msg_sect, - struct tipc_portid const *origin, - unsigned int importance); - -int tipc_forward_buf2name(u32 portref, - struct tipc_name const *name, - u32 domain, - struct sk_buff *buf, - unsigned int dsz, - struct tipc_portid const *orig, - unsigned int importance); - -int tipc_send2port(u32 portref, - struct tipc_portid const *dest, - unsigned int num_sect, - struct iovec const *msg_sect); - -int tipc_send_buf2port(u32 portref, - struct tipc_portid const *dest, - struct sk_buff *buf, - unsigned int dsz); - -int tipc_forward2port(u32 portref, - struct tipc_portid const *dest, - unsigned int num_sect, - struct iovec const *msg_sect, - struct tipc_portid const *origin, - unsigned int importance); - -int tipc_forward_buf2port(u32 portref, - struct tipc_portid const *dest, - struct sk_buff *buf, - unsigned int dsz, - struct tipc_portid const *orig, - unsigned int importance); - -int tipc_multicast(u32 portref, - struct tipc_name_seq const *seq, - u32 domain, /* 0:own zone */ - unsigned int section_count, - struct iovec const *msg); - -#if 0 -int tipc_multicast_buf(u32 portref, - struct tipc_name_seq const *seq, - u32 domain, /* 0:own zone */ - void *buf, - unsigned int size); -#endif - -/* - * TIPC subscription routines - */ - -int tipc_ispublished(struct tipc_name const *name); - -/* - * Get number of available nodes within specified domain (excluding own node) - */ - -unsigned int tipc_available_nodes(const u32 domain); - -#endif - -#endif diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h deleted file mode 100644 index 2151a80cdf3..00000000000 --- a/include/net/tipc/tipc_bearer.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers - * - * Copyright (c) 2003-2006, Ericsson AB - * Copyright (c) 2005, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _NET_TIPC_BEARER_H_ -#define _NET_TIPC_BEARER_H_ - -#ifdef __KERNEL__ - -#include <linux/tipc_config.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> - -/* - * Identifiers of supported TIPC media types - */ - -#define TIPC_MEDIA_TYPE_ETH 1 - -/* - * Destination address structure used by TIPC bearers when sending messages - * - * IMPORTANT: The fields of this structure MUST be stored using the specified - * byte order indicated below, as the structure is exchanged between nodes - * as part of a link setup process. - */ - -struct tipc_media_addr { - __be32 type; /* bearer type (network byte order) */ - union { - __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */ -#if 0 - /* Prototypes for other possible bearer types */ - - struct { - __u16 sin_family; - __u16 sin_port; - struct { - __u32 s_addr; - } sin_addr; - char pad[4]; - } addr_in; /* IP-based bearer */ - __u16 sock_descr; /* generic socket bearer */ -#endif - } dev_addr; -}; - -/** - * struct tipc_bearer - TIPC bearer info available to privileged users - * @usr_handle: pointer to additional user-defined information about bearer - * @mtu: max packet size bearer can support - * @blocked: non-zero if bearer is blocked - * @lock: spinlock for controlling access to bearer - * @addr: media-specific address associated with bearer - * @name: bearer name (format = media:interface) - * - * Note: TIPC initializes "name" and "lock" fields; user is responsible for - * initialization all other fields when a bearer is enabled. - */ - -struct tipc_bearer { - void *usr_handle; - u32 mtu; - int blocked; - spinlock_t lock; - struct tipc_media_addr addr; - char name[TIPC_MAX_BEARER_NAME]; -}; - - -int tipc_register_media(u32 media_type, - char *media_name, - int (*enable)(struct tipc_bearer *), - void (*disable)(struct tipc_bearer *), - int (*send_msg)(struct sk_buff *, - struct tipc_bearer *, - struct tipc_media_addr *), - char *(*addr2str)(struct tipc_media_addr *a, - char *str_buf, - int str_size), - struct tipc_media_addr *bcast_addr, - const u32 bearer_priority, - const u32 link_tolerance, /* [ms] */ - const u32 send_window_limit); - -void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr); - -int tipc_block_bearer(const char *name); -void tipc_continue(struct tipc_bearer *tb_ptr); - -int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority); -int tipc_disable_bearer(const char *name); - - -#endif - -#endif diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h deleted file mode 100644 index 2e159a812f8..00000000000 --- a/include/net/tipc/tipc_msg.h +++ /dev/null @@ -1,207 +0,0 @@ -/* - * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers - * - * Copyright (c) 2003-2006, Ericsson AB - * Copyright (c) 2005, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _NET_TIPC_MSG_H_ -#define _NET_TIPC_MSG_H_ - -#ifdef __KERNEL__ - -struct tipc_msg { - __be32 hdr[15]; -}; - - -/* - TIPC user data message header format, version 2: - - - 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w0:|vers | user |hdr sz |n|d|s|-| message size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w2:| link level ack no | broadcast/link level seq no | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w3:| previous node | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w4:| originating port | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w5:| destination port | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w6:| originating node | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w7:| destination node | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w8:| name type / transport sequence number | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - w9:| name instance/multicast lower bound | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - wA:| multicast upper bound | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / / - \ options \ - / / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -*/ - -#define TIPC_CONN_MSG 0 -#define TIPC_MCAST_MSG 1 -#define TIPC_NAMED_MSG 2 -#define TIPC_DIRECT_MSG 3 - - -static inline u32 msg_word(struct tipc_msg *m, u32 pos) -{ - return ntohl(m->hdr[pos]); -} - -static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask) -{ - return (msg_word(m, w) >> pos) & mask; -} - -static inline u32 msg_importance(struct tipc_msg *m) -{ - return msg_bits(m, 0, 25, 0xf); -} - -static inline u32 msg_hdr_sz(struct tipc_msg *m) -{ - return msg_bits(m, 0, 21, 0xf) << 2; -} - -static inline int msg_short(struct tipc_msg *m) -{ - return (msg_hdr_sz(m) == 24); -} - -static inline u32 msg_size(struct tipc_msg *m) -{ - return msg_bits(m, 0, 0, 0x1ffff); -} - -static inline u32 msg_data_sz(struct tipc_msg *m) -{ - return (msg_size(m) - msg_hdr_sz(m)); -} - -static inline unchar *msg_data(struct tipc_msg *m) -{ - return ((unchar *)m) + msg_hdr_sz(m); -} - -static inline u32 msg_type(struct tipc_msg *m) -{ - return msg_bits(m, 1, 29, 0x7); -} - -static inline u32 msg_named(struct tipc_msg *m) -{ - return (msg_type(m) == TIPC_NAMED_MSG); -} - -static inline u32 msg_mcast(struct tipc_msg *m) -{ - return (msg_type(m) == TIPC_MCAST_MSG); -} - -static inline u32 msg_connected(struct tipc_msg *m) -{ - return (msg_type(m) == TIPC_CONN_MSG); -} - -static inline u32 msg_errcode(struct tipc_msg *m) -{ - return msg_bits(m, 1, 25, 0xf); -} - -static inline u32 msg_prevnode(struct tipc_msg *m) -{ - return msg_word(m, 3); -} - -static inline u32 msg_origport(struct tipc_msg *m) -{ - return msg_word(m, 4); -} - -static inline u32 msg_destport(struct tipc_msg *m) -{ - return msg_word(m, 5); -} - -static inline u32 msg_mc_netid(struct tipc_msg *m) -{ - return msg_word(m, 5); -} - -static inline u32 msg_orignode(struct tipc_msg *m) -{ - if (likely(msg_short(m))) - return msg_prevnode(m); - return msg_word(m, 6); -} - -static inline u32 msg_destnode(struct tipc_msg *m) -{ - return msg_word(m, 7); -} - -static inline u32 msg_nametype(struct tipc_msg *m) -{ - return msg_word(m, 8); -} - -static inline u32 msg_nameinst(struct tipc_msg *m) -{ - return msg_word(m, 9); -} - -static inline u32 msg_namelower(struct tipc_msg *m) -{ - return msg_nameinst(m); -} - -static inline u32 msg_nameupper(struct tipc_msg *m) -{ - return msg_word(m, 10); -} - -#endif - -#endif diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h deleted file mode 100644 index cfc4ba46de8..00000000000 --- a/include/net/tipc/tipc_port.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports - * - * Copyright (c) 1994-2007, Ericsson AB - * Copyright (c) 2005-2007, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _NET_TIPC_PORT_H_ -#define _NET_TIPC_PORT_H_ - -#ifdef __KERNEL__ - -#include <linux/tipc.h> -#include <linux/skbuff.h> -#include <net/tipc/tipc_msg.h> - -#define TIPC_FLOW_CONTROL_WIN 512 - -/** - * struct tipc_port - native TIPC port info available to privileged users - * @usr_handle: pointer to additional user-defined information about port - * @lock: pointer to spinlock for controlling access to port - * @connected: non-zero if port is currently connected to a peer port - * @conn_type: TIPC type used when connection was established - * @conn_instance: TIPC instance used when connection was established - * @conn_unacked: number of unacknowledged messages received from peer port - * @published: non-zero if port has one or more associated names - * @congested: non-zero if cannot send because of link or port congestion - * @max_pkt: maximum packet size "hint" used when building messages sent by port - * @ref: unique reference to port in TIPC object registry - * @phdr: preformatted message header used when sending messages - */ - -struct tipc_port { - void *usr_handle; - spinlock_t *lock; - int connected; - u32 conn_type; - u32 conn_instance; - u32 conn_unacked; - int published; - u32 congested; - u32 max_pkt; - u32 ref; - struct tipc_msg phdr; -}; - - -/** - * tipc_createport_raw - create a native TIPC port and return it's reference - * - * Note: 'dispatcher' and 'wakeup' deliver a locked port. - */ - -u32 tipc_createport_raw(void *usr_handle, - u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), - void (*wakeup)(struct tipc_port *), - const u32 importance); - -/* - * tipc_set_msg_option(): port must be locked. - */ -int tipc_set_msg_option(struct tipc_port *tp_ptr, - const char *opt, - const u32 len); - -int tipc_reject_msg(struct sk_buff *buf, u32 err); - -int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode); - -void tipc_acknowledge(u32 port_ref,u32 ack); - -struct tipc_port *tipc_get_port(const u32 ref); - -void *tipc_get_handle(const u32 ref); - - -#endif - -#endif - diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 27394e0447d..b927413dde8 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -3,57 +3,62 @@ #include <net/checksum.h> -/* - * IPv6 transport protocols - */ - -#ifdef __KERNEL__ - +/* IPv6 transport protocols */ extern struct proto rawv6_prot; extern struct proto udpv6_prot; extern struct proto udplitev6_prot; extern struct proto tcpv6_prot; +extern struct proto pingv6_prot; -struct flowi; +struct flowi6; -/* extention headers */ -extern int ipv6_exthdrs_init(void); -extern void ipv6_exthdrs_exit(void); -extern int ipv6_frag_init(void); -extern void ipv6_frag_exit(void); +/* extension headers */ +int ipv6_exthdrs_init(void); +void ipv6_exthdrs_exit(void); +int ipv6_frag_init(void); +void ipv6_frag_exit(void); /* transport protocols */ -extern int rawv6_init(void); -extern void rawv6_exit(void); -extern int udpv6_init(void); -extern void udpv6_exit(void); -extern int udplitev6_init(void); -extern void udplitev6_exit(void); -extern int tcpv6_init(void); -extern void tcpv6_exit(void); +int pingv6_init(void); +void pingv6_exit(void); +int rawv6_init(void); +void rawv6_exit(void); +int udpv6_init(void); +void udpv6_exit(void); +int udplitev6_init(void); +void udplitev6_exit(void); +int tcpv6_init(void); +void tcpv6_exit(void); -extern int udpv6_connect(struct sock *sk, - struct sockaddr *uaddr, - int addr_len); +int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -extern int datagram_recv_ctl(struct sock *sk, - struct msghdr *msg, - struct sk_buff *skb); +/* this does all the common and the specific ctl work */ +void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb); +void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb); +void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb); -extern int datagram_send_ctl(struct msghdr *msg, - struct flowi *fl, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass); +int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, + struct flowi6 *fl6, struct ipv6_txoptions *opt, + int *hlimit, int *tclass, int *dontfrag); -#define LOOPBACK4_IPV6 __constant_htonl(0x7f000006) +void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + __u16 srcp, __u16 destp, int bucket); -/* - * address family specific functions - */ -extern struct inet_connection_sock_af_ops ipv4_specific; +#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -extern int inet6_destroy_sock(struct sock *sk); +/* address family specific functions */ +extern const struct inet_connection_sock_af_ops ipv4_specific; -#endif +void inet6_destroy_sock(struct sock *sk); + +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" #endif diff --git a/include/net/tso.h b/include/net/tso.h new file mode 100644 index 00000000000..47e5444f7d1 --- /dev/null +++ b/include/net/tso.h @@ -0,0 +1,20 @@ +#ifndef _TSO_H +#define _TSO_H + +#include <net/ip.h> + +struct tso_t { + int next_frag_idx; + void *data; + size_t size; + u16 ip_id; + u32 tcp_seq; +}; + +int tso_count_descs(struct sk_buff *skb); +void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, + int size, bool is_last); +void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); +void tso_start(struct sk_buff *skb, struct tso_t *tso); + +#endif /* _TSO_H */ diff --git a/include/net/udp.h b/include/net/udp.h index c6669c0a74c..68a1fefe3df 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -23,6 +23,7 @@ #define _UDP_H #include <linux/list.h> +#include <linux/bug.h> #include <net/inet_sock.h> #include <net/sock.h> #include <net/snmp.h> @@ -41,7 +42,7 @@ struct udp_skb_cb { union { struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; @@ -50,25 +51,56 @@ struct udp_skb_cb { }; #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) -extern struct hlist_head udp_hash[UDP_HTABLE_SIZE]; -extern rwlock_t udp_hash_lock; - - -/* Note: this must match 'valbool' in sock_setsockopt */ -#define UDP_CSUM_NOXMIT 1 - -/* Used by SunRPC/xprt layer. */ -#define UDP_CSUM_NORCV 2 +/** + * struct udp_hslot - UDP hash slot + * + * @head: head of list of sockets + * @count: number of sockets in 'head' list + * @lock: spinlock protecting changes to head/count + */ +struct udp_hslot { + struct hlist_nulls_head head; + int count; + spinlock_t lock; +} __attribute__((aligned(2 * sizeof(long)))); -/* Default, as per the RFC, is to always do csums. */ -#define UDP_CSUM_DEFAULT 0 +/** + * struct udp_table - UDP table + * + * @hash: hash table, sockets are hashed on (local port) + * @hash2: hash table, sockets are hashed on (local port, local address) + * @mask: number of slots in hash tables, minus 1 + * @log: log2(number of slots in hash table) + */ +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot *hash2; + unsigned int mask; + unsigned int log; +}; +extern struct udp_table udp_table; +void udp_table_init(struct udp_table *, const char *); +static inline struct udp_hslot *udp_hashslot(struct udp_table *table, + struct net *net, unsigned int num) +{ + return &table->hash[udp_hashfn(net, num, table->mask)]; +} +/* + * For secondary hash, net_hash_mix() is performed before calling + * udp_hashslot2(), this explains difference with udp_hashslot() + */ +static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, + unsigned int hash) +{ + return &table->hash2[hash & table->mask]; +} extern struct proto udp_prot; -extern atomic_t udp_memory_allocated; +extern atomic_long_t udp_memory_allocated; /* sysctl variables for udp */ -extern int sysctl_udp_mem[3]; +extern long sysctl_udp_mem[3]; extern int sysctl_udp_rmem_min; extern int sysctl_udp_wmem_min; @@ -79,7 +111,9 @@ struct sk_buff; */ static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) { - return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov); + return (UDP_SKB_CB(skb)->cscov == skb->len ? + __skb_checksum_complete(skb) : + __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); } static inline int udp_lib_checksum_complete(struct sk_buff *skb) @@ -104,107 +138,143 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) return csum; } +static inline __wsum udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} + +static inline __sum16 udp_v4_check(int len, __be32 saddr, + __be32 daddr, __wsum base) +{ + return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); +} + +void udp_set_csum(bool nocheck, struct sk_buff *skb, + __be32 saddr, __be32 daddr, int len); + /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ static inline void udp_lib_hash(struct sock *sk) { BUG(); } -static inline void udp_lib_unhash(struct sock *sk) -{ - write_lock_bh(&udp_hash_lock); - if (sk_del_node_init(sk)) { - inet_sk(sk)->num = 0; - sock_prot_inuse_add(sk->sk_prot, -1); - } - write_unlock_bh(&udp_hash_lock); -} +void udp_lib_unhash(struct sock *sk); +void udp_lib_rehash(struct sock *sk, u16 new_hash); static inline void udp_lib_close(struct sock *sk, long timeout) { sk_common_release(sk); } +int udp_lib_get_port(struct sock *sk, unsigned short snum, + int (*)(const struct sock *, const struct sock *), + unsigned int hash2_nulladdr); /* net/ipv4/udp.c */ -extern int udp_get_port(struct sock *sk, unsigned short snum, - int (*saddr_cmp)(const struct sock *, const struct sock *)); -extern void udp_err(struct sk_buff *, u32); - -extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len); - -extern int udp_rcv(struct sk_buff *skb); -extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); -extern int udp_disconnect(struct sock *sk, int flags); -extern unsigned int udp_poll(struct file *file, struct socket *sock, - poll_table *wait); -extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, int optlen, - int (*push_pending_frames)(struct sock *)); - -DECLARE_SNMP_STAT(struct udp_mib, udp_statistics); -DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6); - -/* UDP-Lite does not have a standardized MIB yet, so we inherit from UDP */ -DECLARE_SNMP_STAT(struct udp_mib, udplite_statistics); -DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); +void udp_v4_early_demux(struct sk_buff *skb); +int udp_get_port(struct sock *sk, unsigned short snum, + int (*saddr_cmp)(const struct sock *, + const struct sock *)); +void udp_err(struct sk_buff *, u32); +int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len); +int udp_push_pending_frames(struct sock *sk); +void udp_flush_pending_frames(struct sock *sk); +void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); +int udp_rcv(struct sk_buff *skb); +int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int udp_disconnect(struct sock *sk, int flags); +unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); +struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features); +int udp_lib_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int udp_lib_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen, + int (*push_pending_frames)(struct sock *)); +struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, + __be32 daddr, __be16 dport, int dif); +struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, + __be32 daddr, __be16 dport, int dif, + struct udp_table *tbl); +struct sock *udp6_lib_lookup(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, __be16 dport, + int dif); +struct sock *__udp6_lib_lookup(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, __be16 dport, + int dif, struct udp_table *tbl); /* * SNMP statistics for UDP and UDP-Lite */ -#define UDP_INC_STATS_USER(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_USER(udplite_statistics, field); \ - else SNMP_INC_STATS_USER(udp_statistics, field); } while(0) -#define UDP_INC_STATS_BH(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_BH(udplite_statistics, field); \ - else SNMP_INC_STATS_BH(udp_statistics, field); } while(0) - -#define UDP6_INC_STATS_BH(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_BH(udplite_stats_in6, field); \ - else SNMP_INC_STATS_BH(udp_stats_in6, field); } while(0) -#define UDP6_INC_STATS_USER(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field); \ - else SNMP_INC_STATS_USER(udp_stats_in6, field); } while(0) - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -#define UDPX_INC_STATS_BH(sk, field) \ - do { \ - if ((sk)->sk_family == AF_INET) \ - UDP_INC_STATS_BH(field, 0); \ - else \ - UDP6_INC_STATS_BH(field, 0); \ - } while (0); +#define UDP_INC_STATS_USER(net, field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \ + else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0) +#define UDP_INC_STATS_BH(net, field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ + else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) + +#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ + else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ +} while(0) +#define UDP6_INC_STATS_USER(net, field, __lite) do { \ + if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \ + else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ +} while(0) + +#if IS_ENABLED(CONFIG_IPV6) +#define UDPX_INC_STATS_BH(sk, field) \ +do { \ + if ((sk)->sk_family == AF_INET) \ + UDP_INC_STATS_BH(sock_net(sk), field, 0); \ + else \ + UDP6_INC_STATS_BH(sock_net(sk), field, 0); \ +} while (0) #else -#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(field, 0) +#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0) #endif /* /proc */ +int udp_seq_open(struct inode *inode, struct file *file); + struct udp_seq_afinfo { - struct module *owner; - char *name; - sa_family_t family; - struct hlist_head *hashtable; - int (*seq_show) (struct seq_file *m, void *v); - struct file_operations *seq_fops; + char *name; + sa_family_t family; + struct udp_table *udp_table; + const struct file_operations *seq_fops; + struct seq_operations seq_ops; }; struct udp_iter_state { + struct seq_net_private p; sa_family_t family; - struct hlist_head *hashtable; int bucket; - struct seq_operations seq_ops; + struct udp_table *udp_table; }; #ifdef CONFIG_PROC_FS -extern int udp_proc_register(struct udp_seq_afinfo *afinfo); -extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo); +int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo); +void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo); -extern int udp4_proc_init(void); -extern void udp4_proc_exit(void); +int udp4_proc_init(void); +void udp4_proc_exit(void); #endif -extern void udp_init(void); +int udpv4_offload_init(void); + +void udp_init(void); + +void udp_encap_enable(void); +#if IS_ENABLED(CONFIG_IPV6) +void udpv6_encap_enable(void); +#endif #endif /* _UDP_H */ diff --git a/include/net/udplite.h b/include/net/udplite.h index b76b2e377af..2caadabcd07 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -11,7 +11,7 @@ #define UDPLITE_RECV_CSCOV 11 /* receiver partial coverage (threshold ) */ extern struct proto udplite_prot; -extern struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; +extern struct udp_table udplite_table; /* * Checksum computation is all in software, hence simpler getfrag. @@ -40,7 +40,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets * with a zero checksum field are illegal. */ if (uh->check == 0) { - LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: zeroed checksum field\n"); + LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n"); return 1; } @@ -52,7 +52,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* * Coverage length violates RFC 3828: log and discard silently. */ - LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: bad csum coverage %d/%d\n", + LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n", cscov, skb->len); return 1; @@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) return 0; } -static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) +/* Slow-path computation of checksum. Socket is locked. */ +static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) { + const struct udp_sock *up = udp_sk(skb->sk); int cscov = up->len; + __wsum csum = 0; - /* - * Sender has set `partial coverage' option on UDP-Lite socket - */ - if (up->pcflag & UDPLITE_SEND_CC) { + if (up->pcflag & UDPLITE_SEND_CC) { + /* + * Sender has set `partial coverage' option on UDP-Lite socket. + * The special case "up->pcslen == 0" signifies full coverage. + */ if (up->pcslen < up->len) { - /* up->pcslen == 0 means that full coverage is required, - * partial coverage only if 0 < up->pcslen < up->len */ - if (0 < up->pcslen) { - cscov = up->pcslen; - } - uh->len = htons(up->pcslen); + if (0 < up->pcslen) + cscov = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); } - /* - * NOTE: Causes for the error case `up->pcslen > up->len': - * (i) Application error (will not be penalized). - * (ii) Payload too big for send buffer: data is split - * into several packets, each with its own header. - * In this case (e.g. last segment), coverage may - * exceed packet length. - * Since packets with coverage length > packet length are - * illegal, we fall back to the defaults here. - */ + /* + * NOTE: Causes for the error case `up->pcslen > up->len': + * (i) Application error (will not be penalized). + * (ii) Payload too big for send buffer: data is split + * into several packets, each with its own header. + * In this case (e.g. last segment), coverage may + * exceed packet length. + * Since packets with coverage length > packet length are + * illegal, we fall back to the defaults here. + */ } - return cscov; -} - -static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) -{ - int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); - __wsum csum = 0; skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ @@ -115,7 +109,24 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) return csum; } -extern void udplite4_register(void); -extern int udplite_get_port(struct sock *sk, unsigned short snum, - int (*scmp)(const struct sock *, const struct sock *)); +/* Fast-path computation of checksum. Socket may not be locked. */ +static inline __wsum udplite_csum(struct sk_buff *skb) +{ + const struct udp_sock *up = udp_sk(skb->sk); + const int off = skb_transport_offset(skb); + int len = skb->len - off; + + if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { + if (0 < up->pcslen) + len = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); + } + skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ + + return skb_checksum(skb, off, len, 0); +} + +void udplite4_register(void); +int udplite_get_port(struct sock *sk, unsigned short snum, + int (*scmp)(const struct sock *, const struct sock *)); #endif /* _UDPLITE_H */ diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h new file mode 100644 index 00000000000..9ccd5316eac --- /dev/null +++ b/include/net/vsock_addr.h @@ -0,0 +1,30 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _VSOCK_ADDR_H_ +#define _VSOCK_ADDR_H_ + +#include <linux/vm_sockets.h> + +void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port); +int vsock_addr_validate(const struct sockaddr_vm *addr); +bool vsock_addr_bound(const struct sockaddr_vm *addr); +void vsock_addr_unbind(struct sockaddr_vm *addr); +bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, + const struct sockaddr_vm *other); +int vsock_addr_cast(const struct sockaddr *addr, size_t len, + struct sockaddr_vm **out_addr); + +#endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h new file mode 100644 index 00000000000..12196ce661d --- /dev/null +++ b/include/net/vxlan.h @@ -0,0 +1,62 @@ +#ifndef __NET_VXLAN_H +#define __NET_VXLAN_H 1 + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/udp.h> + +#define VNI_HASH_BITS 10 +#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) + +struct vxlan_sock; +typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); + +/* per UDP socket information */ +struct vxlan_sock { + struct hlist_node hlist; + vxlan_rcv_t *rcv; + void *data; + struct work_struct del_work; + struct socket *sock; + struct rcu_head rcu; + struct hlist_head vni_list[VNI_HASH_SIZE]; + atomic_t refcnt; + struct udp_offload udp_offloads; +}; + +#define VXLAN_F_LEARN 0x01 +#define VXLAN_F_PROXY 0x02 +#define VXLAN_F_RSC 0x04 +#define VXLAN_F_L2MISS 0x08 +#define VXLAN_F_L3MISS 0x10 +#define VXLAN_F_IPV6 0x20 +#define VXLAN_F_UDP_CSUM 0x40 +#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80 +#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100 + +struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + vxlan_rcv_t *rcv, void *data, + bool no_share, u32 flags); + +void vxlan_sock_release(struct vxlan_sock *vs); + +int vxlan_xmit_skb(struct vxlan_sock *vs, + struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); + +__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb); + +/* IP header + UDP + VXLAN + Ethernet header */ +#define VXLAN_HEADROOM (20 + 8 + 8 + 14) +/* IPv6 header + UDP + VXLAN + Ethernet header */ +#define VXLAN6_HEADROOM (40 + 8 + 8 + 14) + +#if IS_ENABLED(CONFIG_VXLAN) +void vxlan_get_rx_port(struct net_device *netdev); +#else +static inline void vxlan_get_rx_port(struct net_device *netdev) +{ +} +#endif +#endif diff --git a/include/net/wext.h b/include/net/wext.h index 80b31d826b7..345911965db 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -1,17 +1,34 @@ #ifndef __NET_WEXT_H #define __NET_WEXT_H -/* - * wireless extensions interface to the core code - */ +#include <net/iw_handler.h> struct net; -#ifdef CONFIG_WIRELESS_EXT -extern int wext_proc_init(struct net *net); -extern void wext_proc_exit(struct net *net); -extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, - void __user *arg); +#ifdef CONFIG_WEXT_CORE +int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, + void __user *arg); +int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, + unsigned long arg); + +struct iw_statistics *get_wireless_stats(struct net_device *dev); +int call_commit_handler(struct net_device *dev); +#else +static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, + void __user *arg) +{ + return -EINVAL; +} +static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, + unsigned long arg) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_WEXT_PROC +int wext_proc_init(struct net *net); +void wext_proc_exit(struct net *net); #else static inline int wext_proc_init(struct net *net) { @@ -21,11 +38,23 @@ static inline void wext_proc_exit(struct net *net) { return; } -static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, - void __user *arg) -{ - return -EINVAL; -} #endif +#ifdef CONFIG_WEXT_PRIV +int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler); +int compat_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler); +int iw_handler_get_private(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra); +#else +#define ioctl_private_call NULL +#define compat_private_call NULL +#endif + + #endif /* __NET_WEXT_H */ diff --git a/include/net/wimax.h b/include/net/wimax.h new file mode 100644 index 00000000000..e52ef5357e0 --- /dev/null +++ b/include/net/wimax.h @@ -0,0 +1,518 @@ +/* + * Linux WiMAX + * Kernel space API for accessing WiMAX devices + * + * + * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * The WiMAX stack provides an API for controlling and managing the + * system's WiMAX devices. This API affects the control plane; the + * data plane is accessed via the network stack (netdev). + * + * Parts of the WiMAX stack API and notifications are exported to + * user space via Generic Netlink. In user space, libwimax (part of + * the wimax-tools package) provides a shim layer for accessing those + * calls. + * + * The API is standarized for all WiMAX devices and different drivers + * implement the backend support for it. However, device-specific + * messaging pipes are provided that can be used to issue commands and + * receive notifications in free form. + * + * Currently the messaging pipes are the only means of control as it + * is not known (due to the lack of more devices in the market) what + * will be a good abstraction layer. Expect this to change as more + * devices show in the market. This API is designed to be growable in + * order to address this problem. + * + * USAGE + * + * Embed a `struct wimax_dev` at the beginning of the the device's + * private structure, initialize and register it. For details, see + * `struct wimax_dev`s documentation. + * + * Once this is done, wimax-tools's libwimaxll can be used to + * communicate with the driver from user space. You user space + * application does not have to forcibily use libwimaxll and can talk + * the generic netlink protocol directly if desired. + * + * Remember this is a very low level API that will to provide all of + * WiMAX features. Other daemons and services running in user space + * are the expected clients of it. They offer a higher level API that + * applications should use (an example of this is the Intel's WiMAX + * Network Service for the i2400m). + * + * DESIGN + * + * Although not set on final stone, this very basic interface is + * mostly completed. Remember this is meant to grow as new common + * operations are decided upon. New operations will be added to the + * interface, intent being on keeping backwards compatibility as much + * as possible. + * + * This layer implements a set of calls to control a WiMAX device, + * exposing a frontend to the rest of the kernel and user space (via + * generic netlink) and a backend implementation in the driver through + * function pointers. + * + * WiMAX devices have a state, and a kernel-only API allows the + * drivers to manipulate that state. State transitions are atomic, and + * only some of them are allowed (see `enum wimax_st`). + * + * Most API calls will set the state automatically; in most cases + * drivers have to only report state changes due to external + * conditions. + * + * All API operations are 'atomic', serialized through a mutex in the + * `struct wimax_dev`. + * + * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK + * + * The API is exported to user space using generic netlink (other + * methods can be added as needed). + * + * There is a Generic Netlink Family named "WiMAX", where interfaces + * supporting the WiMAX interface receive commands and broadcast their + * signals over a multicast group named "msg". + * + * Mapping to the source/destination interface is done by an interface + * index attribute. + * + * For user-to-kernel traffic (commands) we use a function call + * marshalling mechanism, where a message X with attributes A, B, C + * sent from user space to kernel space means executing the WiMAX API + * call wimax_X(A, B, C), sending the results back as a message. + * + * Kernel-to-user (notifications or signals) communication is sent + * over multicast groups. This allows to have multiple applications + * monitoring them. + * + * Each command/signal gets assigned it's own attribute policy. This + * way the validator will verify that all the attributes in there are + * only the ones that should be for each command/signal. Thing of an + * attribute mapping to a type+argumentname for each command/signal. + * + * If we had a single policy for *all* commands/signals, after running + * the validator we'd have to check "does this attribute belong in + * here"? for each one. It can be done manually, but it's just easier + * to have the validator do that job with multiple policies. As well, + * it makes it easier to later expand each command/signal signature + * without affecting others and keeping the namespace more or less + * sane. Not that it is too complicated, but it makes it even easier. + * + * No state information is maintained in the kernel for each user + * space connection (the connection is stateless). + * + * TESTING FOR THE INTERFACE AND VERSIONING + * + * If network interface X is a WiMAX device, there will be a Generic + * Netlink family named "WiMAX X" and the device will present a + * "wimax" directory in it's network sysfs directory + * (/sys/class/net/DEVICE/wimax) [used by HAL]. + * + * The inexistence of any of these means the device does not support + * this WiMAX API. + * + * By querying the generic netlink controller, versioning information + * and the multicast groups available can be found. Applications using + * the interface can either rely on that or use the generic netlink + * controller to figure out which generic netlink commands/signals are + * supported. + * + * NOTE: this versioning is a last resort to avoid hard + * incompatibilities. It is the intention of the design of this + * stack not to introduce backward incompatible changes. + * + * The version code has to fit in one byte (restrictions imposed by + * generic netlink); we use `version / 10` for the major version and + * `version % 10` for the minor. This gives 9 minors for each major + * and 25 majors. + * + * The version change protocol is as follow: + * + * - Major versions: needs to be increased if an existing message/API + * call is changed or removed. Doesn't need to be changed if a new + * message is added. + * + * - Minor version: needs to be increased if new messages/API calls are + * being added or some other consideration that doesn't impact the + * user-kernel interface too much (like some kind of bug fix) and + * that is kind of left up in the air to common sense. + * + * User space code should not try to work if the major version it was + * compiled for differs from what the kernel offers. As well, if the + * minor version of the kernel interface is lower than the one user + * space is expecting (the one it was compiled for), the kernel + * might be missing API calls; user space shall be ready to handle + * said condition. Use the generic netlink controller operations to + * find which ones are supported and which not. + * + * libwimaxll:wimaxll_open() takes care of checking versions. + * + * THE OPERATIONS: + * + * Each operation is defined in its on file (drivers/net/wimax/op-*.c) + * for clarity. The parts needed for an operation are: + * + * - a function pointer in `struct wimax_dev`: optional, as the + * operation might be implemented by the stack and not by the + * driver. + * + * All function pointers are named wimax_dev->op_*(), and drivers + * must implement them except where noted otherwise. + * + * - When exported to user space, a `struct nla_policy` to define the + * attributes of the generic netlink command and a `struct genl_ops` + * to define the operation. + * + * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>) + * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in + * include/linux/wimax.h; this file is intended to be cloned by user + * space to gain access to those declarations. + * + * A few caveats to remember: + * + * - Need to define attribute numbers starting in 1; otherwise it + * fails. + * + * - the `struct genl_family` requires a maximum attribute id; when + * defining the `struct nla_policy` for each message, it has to have + * an array size of WIMAX_GNL_ATTR_MAX+1. + * + * The op_*() function pointers will not be called if the wimax_dev is + * in a state <= %WIMAX_ST_UNINITIALIZED. The exception is: + * + * - op_reset: can be called at any time after wimax_dev_add() has + * been called. + * + * THE PIPE INTERFACE: + * + * This interface is kept intentionally simple. The driver can send + * and receive free-form messages to/from user space through a + * pipe. See drivers/net/wimax/op-msg.c for details. + * + * The kernel-to-user messages are sent with + * wimax_msg(). user-to-kernel messages are delivered via + * wimax_dev->op_msg_from_user(). + * + * RFKILL: + * + * RFKILL support is built into the wimax_dev layer; the driver just + * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in + * the hardware or software RF kill switches. When the stack wants to + * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(), + * which the driver implements. + * + * User space can set the software RF Kill switch by calling + * wimax_rfkill(). + * + * The code for now only supports devices that don't require polling; + * If the device needs to be polled, create a self-rearming delayed + * work struct for polling or look into adding polled support to the + * WiMAX stack. + * + * When initializing the hardware (_probe), after calling + * wimax_dev_add(), query the device for it's RF Kill switches status + * and feed it back to the WiMAX stack using + * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always + * report it as ON. + * + * NOTE: the wimax stack uses an inverted terminology to that of the + * RFKILL subsystem: + * + * - ON: radio is ON, RFKILL is DISABLED or OFF. + * - OFF: radio is OFF, RFKILL is ENABLED or ON. + * + * MISCELLANEOUS OPS: + * + * wimax_reset() can be used to reset the device to power on state; by + * default it issues a warm reset that maintains the same device + * node. If that is not possible, it falls back to a cold reset + * (device reconnect). The driver implements the backend to this + * through wimax_dev->op_reset(). + */ + +#ifndef __NET__WIMAX_H__ +#define __NET__WIMAX_H__ + +#include <linux/wimax.h> +#include <net/genetlink.h> +#include <linux/netdevice.h> + +struct net_device; +struct genl_info; +struct wimax_dev; + +/** + * struct wimax_dev - Generic WiMAX device + * + * @net_dev: [fill] Pointer to the &struct net_device this WiMAX + * device implements. + * + * @op_msg_from_user: [fill] Driver-specific operation to + * handle a raw message from user space to the driver. The + * driver can send messages to user space using with + * wimax_msg_to_user(). + * + * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on + * userspace (or any other agent) requesting the WiMAX device to + * change the RF Kill software switch (WIMAX_RF_ON or + * WIMAX_RF_OFF). + * If such hardware support is not present, it is assumed the + * radio cannot be switched off and it is always on (and the stack + * will error out when trying to switch it off). In such case, + * this function pointer can be left as NULL. + * + * @op_reset: [fill] Driver specific operation to reset the + * device. + * This operation should always attempt first a warm reset that + * does not disconnect the device from the bus and return 0. + * If that fails, it should resort to some sort of cold or bus + * reset (even if it implies a bus disconnection and device + * disappearance). In that case, -ENODEV should be returned to + * indicate the device is gone. + * This operation has to be synchronous, and return only when the + * reset is complete. In case of having had to resort to bus/cold + * reset implying a device disconnection, the call is allowed to + * return inmediately. + * NOTE: wimax_dev->mutex is NOT locked when this op is being + * called; however, wimax_dev->mutex_reset IS locked to ensure + * serialization of calls to wimax_reset(). + * See wimax_reset()'s documentation. + * + * @name: [fill] A way to identify this device. We need to register a + * name with many subsystems (rfkill, workqueue creation, etc). + * We can't use the network device name as that + * might change and in some instances we don't know it yet (until + * we don't call register_netdev()). So we generate an unique one + * using the driver name and device bus id, place it here and use + * it across the board. Recommended naming: + * DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id). + * + * @id_table_node: [private] link to the list of wimax devices kept by + * id-table.c. Protected by it's own spinlock. + * + * @mutex: [private] Serializes all concurrent access and execution of + * operations. + * + * @mutex_reset: [private] Serializes reset operations. Needs to be a + * different mutex because as part of the reset operation, the + * driver has to call back into the stack to do things such as + * state change, that require wimax_dev->mutex. + * + * @state: [private] Current state of the WiMAX device. + * + * @rfkill: [private] integration into the RF-Kill infrastructure. + * + * @rf_sw: [private] State of the software radio switch (OFF/ON) + * + * @rf_hw: [private] State of the hardware radio switch (OFF/ON) + * + * @debugfs_dentry: [private] Used to hook up a debugfs entry. This + * shows up in the debugfs root as wimax\:DEVICENAME. + * + * Description: + * This structure defines a common interface to access all WiMAX + * devices from different vendors and provides a common API as well as + * a free-form device-specific messaging channel. + * + * Usage: + * 1. Embed a &struct wimax_dev at *the beginning* the network + * device structure so that netdev_priv() points to it. + * + * 2. memset() it to zero + * + * 3. Initialize with wimax_dev_init(). This will leave the WiMAX + * device in the %__WIMAX_ST_NULL state. + * + * 4. Fill all the fields marked with [fill]; once called + * wimax_dev_add(), those fields CANNOT be modified. + * + * 5. Call wimax_dev_add() *after* registering the network + * device. This will leave the WiMAX device in the %WIMAX_ST_DOWN + * state. + * Protect the driver's net_device->open() against succeeding if + * the wimax device state is lower than %WIMAX_ST_DOWN. + * + * 6. Select when the device is going to be turned on/initialized; + * for example, it could be initialized on 'ifconfig up' (when the + * netdev op 'open()' is called on the driver). + * + * When the device is initialized (at `ifconfig up` time, or right + * after calling wimax_dev_add() from _probe(), make sure the + * following steps are taken + * + * a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so + * some API calls that shouldn't work until the device is ready + * can be blocked. + * + * b. Initialize the device. Make sure to turn the SW radio switch + * off and move the device to state %WIMAX_ST_RADIO_OFF when + * done. When just initialized, a device should be left in RADIO + * OFF state until user space devices to turn it on. + * + * c. Query the device for the state of the hardware rfkill switch + * and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw() + * as needed. See below. + * + * wimax_dev_rm() undoes before unregistering the network device. Once + * wimax_dev_add() is called, the driver can get called on the + * wimax_dev->op_* function pointers + * + * CONCURRENCY: + * + * The stack provides a mutex for each device that will disallow API + * calls happening concurrently; thus, op calls into the driver + * through the wimax_dev->op*() function pointers will always be + * serialized and *never* concurrent. + * + * For locking, take wimax_dev->mutex is taken; (most) operations in + * the API have to check for wimax_dev_is_ready() to return 0 before + * continuing (this is done internally). + * + * REFERENCE COUNTING: + * + * The WiMAX device is reference counted by the associated network + * device. The only operation that can be used to reference the device + * is wimax_dev_get_by_genl_info(), and the reference it acquires has + * to be released with dev_put(wimax_dev->net_dev). + * + * RFKILL: + * + * At startup, both HW and SW radio switchess are assumed to be off. + * + * At initialization time [after calling wimax_dev_add()], have the + * driver query the device for the status of the software and hardware + * RF kill switches and call wimax_report_rfkill_hw() and + * wimax_rfkill_report_sw() to indicate their state. If any is + * missing, just call it to indicate it is ON (radio always on). + * + * Whenever the driver detects a change in the state of the RF kill + * switches, it should call wimax_report_rfkill_hw() or + * wimax_report_rfkill_sw() to report it to the stack. + */ +struct wimax_dev { + struct net_device *net_dev; + struct list_head id_table_node; + struct mutex mutex; /* Protects all members and API calls */ + struct mutex mutex_reset; + enum wimax_st state; + + int (*op_msg_from_user)(struct wimax_dev *wimax_dev, + const char *, + const void *, size_t, + const struct genl_info *info); + int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev, + enum wimax_rf_state); + int (*op_reset)(struct wimax_dev *wimax_dev); + + struct rfkill *rfkill; + unsigned int rf_hw; + unsigned int rf_sw; + char name[32]; + + struct dentry *debugfs_dentry; +}; + + + +/* + * WiMAX stack public API for device drivers + * ----------------------------------------- + * + * These functions are not exported to user space. + */ +void wimax_dev_init(struct wimax_dev *); +int wimax_dev_add(struct wimax_dev *, struct net_device *); +void wimax_dev_rm(struct wimax_dev *); + +static inline +struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev) +{ + return netdev_priv(net_dev); +} + +static inline +struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev) +{ + return wimax_dev->net_dev->dev.parent; +} + +void wimax_state_change(struct wimax_dev *, enum wimax_st); +enum wimax_st wimax_state_get(struct wimax_dev *); + +/* + * Radio Switch state reporting. + * + * enum wimax_rf_state is declared in linux/wimax.h so the exports + * to user space can use it. + */ +void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state); +void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state); + + +/* + * Free-form messaging to/from user space + * + * Sending a message: + * + * wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL); + * + * Broken up: + * + * skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL); + * ...fill up skb... + * wimax_msg_send(wimax_dev, pipe_name, skb); + * + * Be sure not to modify skb->data in the middle (ie: don't use + * skb_push()/skb_pull()/skb_reserve() on the skb). + * + * "pipe_name" is any string, that can be interpreted as the name of + * the pipe or recipient; the interpretation of it is driver + * specific, so the recipient can multiplex it as wished. It can be + * NULL, it won't be used - an example is using a "diagnostics" tag to + * send diagnostics information that a device-specific diagnostics + * tool would be interested in. + */ +struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *, + size_t, gfp_t); +int wimax_msg_send(struct wimax_dev *, struct sk_buff *); +int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t); + +const void *wimax_msg_data_len(struct sk_buff *, size_t *); +const void *wimax_msg_data(struct sk_buff *); +ssize_t wimax_msg_len(struct sk_buff *); + + +/* + * WiMAX stack user space API + * -------------------------- + * + * This API is what gets exported to user space for general + * operations. As well, they can be called from within the kernel, + * (with a properly referenced `struct wimax_dev`). + * + * Properly referenced means: the 'struct net_device' that embeds the + * device's control structure and (as such) the 'struct wimax_dev' is + * referenced by the caller. + */ +int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state); +int wimax_reset(struct wimax_dev *); + +#endif /* #ifndef __NET__WIMAX_H__ */ diff --git a/include/net/wireless.h b/include/net/wireless.h deleted file mode 100644 index d30c4ba8fd9..00000000000 --- a/include/net/wireless.h +++ /dev/null @@ -1,139 +0,0 @@ -#ifndef __NET_WIRELESS_H -#define __NET_WIRELESS_H - -/* - * 802.11 device management - * - * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> - */ - -#include <linux/netdevice.h> -#include <linux/debugfs.h> -#include <linux/list.h> -#include <net/cfg80211.h> - -/** - * struct wiphy - wireless hardware description - * @idx: the wiphy index assigned to this item - * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> - */ -struct wiphy { - /* assign these fields before you register the wiphy */ - - /* permanent MAC address */ - u8 perm_addr[ETH_ALEN]; - - /* If multiple wiphys are registered and you're handed e.g. - * a regular netdev with assigned ieee80211_ptr, you won't - * know whether it points to a wiphy your driver has registered - * or not. Assign this to something global to your driver to - * help determine whether you own this wiphy or not. */ - void *privid; - - /* fields below are read-only, assigned by cfg80211 */ - - /* the item in /sys/class/ieee80211/ points to this, - * you need use set_wiphy_dev() (see below) */ - struct device dev; - - /* dir in debugfs: ieee80211/<wiphyname> */ - struct dentry *debugfsdir; - - char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); -}; - -/** struct wireless_dev - wireless per-netdev state - * - * This structure must be allocated by the driver/stack - * that uses the ieee80211_ptr field in struct net_device - * (this is intentional so it can be allocated along with - * the netdev.) - * - * @wiphy: pointer to hardware description - */ -struct wireless_dev { - struct wiphy *wiphy; - - /* private to the generic wireless code */ - struct list_head list; - struct net_device *netdev; -}; - -/** - * wiphy_priv - return priv from wiphy - */ -static inline void *wiphy_priv(struct wiphy *wiphy) -{ - BUG_ON(!wiphy); - return &wiphy->priv; -} - -/** - * set_wiphy_dev - set device pointer for wiphy - */ -static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) -{ - wiphy->dev.parent = dev; -} - -/** - * wiphy_dev - get wiphy dev pointer - */ -static inline struct device *wiphy_dev(struct wiphy *wiphy) -{ - return wiphy->dev.parent; -} - -/** - * wiphy_name - get wiphy name - */ -static inline char *wiphy_name(struct wiphy *wiphy) -{ - return wiphy->dev.bus_id; -} - -/** - * wdev_priv - return wiphy priv from wireless_dev - */ -static inline void *wdev_priv(struct wireless_dev *wdev) -{ - BUG_ON(!wdev); - return wiphy_priv(wdev->wiphy); -} - -/** - * wiphy_new - create a new wiphy for use with cfg80211 - * - * create a new wiphy and associate the given operations with it. - * @sizeof_priv bytes are allocated for private use. - * - * the returned pointer must be assigned to each netdev's - * ieee80211_ptr for proper operation. - */ -struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv); - -/** - * wiphy_register - register a wiphy with cfg80211 - * - * register the given wiphy - * - * Returns a non-negative wiphy index or a negative error code. - */ -extern int wiphy_register(struct wiphy *wiphy); - -/** - * wiphy_unregister - deregister a wiphy from cfg80211 - * - * unregister a device with the given priv pointer. - * After this call, no more requests can be made with this priv - * pointer, but the call may sleep to wait for an outstanding - * request that is being handled. - */ -extern void wiphy_unregister(struct wiphy *wiphy); - -/** - * wiphy_free - free wiphy - */ -extern void wiphy_free(struct wiphy *wiphy); - -#endif /* __NET_WIRELESS_H */ diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h new file mode 100644 index 00000000000..10ab0fc6d4f --- /dev/null +++ b/include/net/wpan-phy.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + */ + +#ifndef WPAN_PHY_H +#define WPAN_PHY_H + +#include <linux/netdevice.h> +#include <linux/mutex.h> +#include <linux/bug.h> + +/* According to the IEEE 802.15.4 stadard the upper most significant bits of + * the 32-bit channel bitmaps shall be used as an integer value to specify 32 + * possible channel pages. The lower 27 bits of the channel bit map shall be + * used as a bit mask to specify channel numbers within a channel page. + */ +#define WPAN_NUM_CHANNELS 27 +#define WPAN_NUM_PAGES 32 + +struct wpan_phy { + struct mutex pib_lock; + + /* + * This is a PIB according to 802.15.4-2011. + * We do not provide timing-related variables, as they + * aren't used outside of driver + */ + u8 current_channel; + u8 current_page; + u32 channels_supported[32]; + s8 transmit_power; + u8 cca_mode; + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + + bool lbt; + s32 cca_ed_level; + + struct device dev; + int idx; + + struct net_device *(*add_iface)(struct wpan_phy *phy, + const char *name, int type); + void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); + + int (*set_txpower)(struct wpan_phy *phy, int db); + int (*set_lbt)(struct wpan_phy *phy, bool on); + int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode); + int (*set_cca_ed_level)(struct wpan_phy *phy, int level); + int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be, + u8 retries); + int (*set_frame_retries)(struct wpan_phy *phy, s8 retries); + + char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) + +struct wpan_phy *wpan_phy_alloc(size_t priv_size); +static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) +{ + phy->dev.parent = dev; +} +int wpan_phy_register(struct wpan_phy *phy); +void wpan_phy_unregister(struct wpan_phy *phy); +void wpan_phy_free(struct wpan_phy *phy); +/* Same semantics as for class_for_each_device */ +int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); + +static inline void *wpan_phy_priv(struct wpan_phy *phy) +{ + BUG_ON(!phy); + return &phy->priv; +} + +struct wpan_phy *wpan_phy_find(const char *str); + +static inline void wpan_phy_put(struct wpan_phy *phy) +{ + put_device(&phy->dev); +} + +static inline const char *wpan_phy_name(struct wpan_phy *phy) +{ + return dev_name(&phy->dev); +} +#endif diff --git a/include/net/x25.h b/include/net/x25.h index fc3f03d976f..c383aa4edbf 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -10,6 +10,7 @@ #ifndef _X25_H #define _X25_H #include <linux/x25.h> +#include <linux/slab.h> #include <net/sock.h> #define X25_ADDR_LEN 16 @@ -79,8 +80,6 @@ enum { #define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */ #define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */ #define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */ -#define X25_DENY_ACCPT_APPRV 0x01 /* Default value */ -#define X25_ALLOW_ACCPT_APPRV 0x00 /* Control enabled */ #define X25_SMODULUS 8 #define X25_EMODULUS 128 @@ -112,6 +111,11 @@ enum { #define X25_MAX_AE_LEN 40 /* Max num of semi-octets in AE - OSI Nw */ #define X25_MAX_DTE_FACIL_LEN 21 /* Max length of DTE facility params */ +/* Bitset in x25_sock->flags for misc flags */ +#define X25_Q_BIT_FLAG 0 +#define X25_INTERRUPT_FLAG 1 +#define X25_ACCPT_APPRV_FLAG 2 + /** * struct x25_route - x25 routing entry * @node - entry in x25_list_lock @@ -145,10 +149,11 @@ struct x25_sock { struct x25_address source_addr, dest_addr; struct x25_neigh *neighbour; unsigned int lci, cudmatchlength; - unsigned char state, condition, qbitincl, intflag, accptapprv; + unsigned char state, condition; unsigned short vs, vr, va, vl; unsigned long t2, t21, t22, t23; unsigned short fraglen; + unsigned long flags; struct sk_buff_head ack_queue; struct sk_buff_head fragment_queue; struct sk_buff_head interrupt_in_queue; @@ -182,53 +187,57 @@ extern int sysctl_x25_clear_request_timeout; extern int sysctl_x25_ack_holdback_timeout; extern int sysctl_x25_forward; -extern int x25_addr_ntoa(unsigned char *, struct x25_address *, - struct x25_address *); -extern int x25_addr_aton(unsigned char *, struct x25_address *, - struct x25_address *); -extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *); -extern void x25_destroy_socket(struct sock *); -extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int); -extern void x25_kill_by_neigh(struct x25_neigh *); +int x25_parse_address_block(struct sk_buff *skb, + struct x25_address *called_addr, + struct x25_address *calling_addr); + +int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *); +int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *); +struct sock *x25_find_socket(unsigned int, struct x25_neigh *); +void x25_destroy_socket_from_timer(struct sock *); +int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int); +void x25_kill_by_neigh(struct x25_neigh *); /* x25_dev.c */ -extern void x25_send_frame(struct sk_buff *, struct x25_neigh *); -extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); -extern void x25_establish_link(struct x25_neigh *); -extern void x25_terminate_link(struct x25_neigh *); +void x25_send_frame(struct sk_buff *, struct x25_neigh *); +int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); +void x25_establish_link(struct x25_neigh *); +void x25_terminate_link(struct x25_neigh *); /* x25_facilities.c */ -extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *, - struct x25_dte_facilities *, unsigned long *); -extern int x25_create_facilities(unsigned char *, struct x25_facilities *, - struct x25_dte_facilities *, unsigned long); -extern int x25_negotiate_facilities(struct sk_buff *, struct sock *, - struct x25_facilities *, - struct x25_dte_facilities *); -extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *); +int x25_parse_facilities(struct sk_buff *, struct x25_facilities *, + struct x25_dte_facilities *, unsigned long *); +int x25_create_facilities(unsigned char *, struct x25_facilities *, + struct x25_dte_facilities *, unsigned long); +int x25_negotiate_facilities(struct sk_buff *, struct sock *, + struct x25_facilities *, + struct x25_dte_facilities *); +void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *); /* x25_forward.c */ -extern void x25_clear_forward_by_lci(unsigned int lci); -extern void x25_clear_forward_by_dev(struct net_device *); -extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *); -extern int x25_forward_call(struct x25_address *, struct x25_neigh *, - struct sk_buff *, int); +void x25_clear_forward_by_lci(unsigned int lci); +void x25_clear_forward_by_dev(struct net_device *); +int x25_forward_data(int, struct x25_neigh *, struct sk_buff *); +int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *, + int); /* x25_in.c */ -extern int x25_process_rx_frame(struct sock *, struct sk_buff *); -extern int x25_backlog_rcv(struct sock *, struct sk_buff *); +int x25_process_rx_frame(struct sock *, struct sk_buff *); +int x25_backlog_rcv(struct sock *, struct sk_buff *); /* x25_link.c */ -extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short); -extern void x25_link_device_up(struct net_device *); -extern void x25_link_device_down(struct net_device *); -extern void x25_link_established(struct x25_neigh *); -extern void x25_link_terminated(struct x25_neigh *); -extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char); -extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *); -extern int x25_subscr_ioctl(unsigned int, void __user *); -extern struct x25_neigh *x25_get_neigh(struct net_device *); -extern void x25_link_free(void); +void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short); +void x25_link_device_up(struct net_device *); +void x25_link_device_down(struct net_device *); +void x25_link_established(struct x25_neigh *); +void x25_link_terminated(struct x25_neigh *); +void x25_transmit_clear_request(struct x25_neigh *, unsigned int, + unsigned char); +void x25_transmit_link(struct sk_buff *, struct x25_neigh *); +int x25_subscr_ioctl(unsigned int, void __user *); +struct x25_neigh *x25_get_neigh(struct net_device *); +void x25_link_free(void); /* x25_neigh.c */ static __inline__ void x25_neigh_hold(struct x25_neigh *nb) @@ -243,16 +252,16 @@ static __inline__ void x25_neigh_put(struct x25_neigh *nb) } /* x25_out.c */ -extern int x25_output(struct sock *, struct sk_buff *); -extern void x25_kick(struct sock *); -extern void x25_enquiry_response(struct sock *); +int x25_output(struct sock *, struct sk_buff *); +void x25_kick(struct sock *); +void x25_enquiry_response(struct sock *); /* x25_route.c */ -extern struct x25_route *x25_get_route(struct x25_address *addr); -extern struct net_device *x25_dev_get(char *); -extern void x25_route_device_down(struct net_device *dev); -extern int x25_route_ioctl(unsigned int, void __user *); -extern void x25_route_free(void); +struct x25_route *x25_get_route(struct x25_address *addr); +struct net_device *x25_dev_get(char *); +void x25_route_device_down(struct net_device *dev); +int x25_route_ioctl(unsigned int, void __user *); +void x25_route_free(void); static __inline__ void x25_route_hold(struct x25_route *rt) { @@ -266,31 +275,38 @@ static __inline__ void x25_route_put(struct x25_route *rt) } /* x25_subr.c */ -extern void x25_clear_queues(struct sock *); -extern void x25_frames_acked(struct sock *, unsigned short); -extern void x25_requeue_frames(struct sock *); -extern int x25_validate_nr(struct sock *, unsigned short); -extern void x25_write_internal(struct sock *, int); -extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *); -extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char); +void x25_clear_queues(struct sock *); +void x25_frames_acked(struct sock *, unsigned short); +void x25_requeue_frames(struct sock *); +int x25_validate_nr(struct sock *, unsigned short); +void x25_write_internal(struct sock *, int); +int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, + int *); +void x25_disconnect(struct sock *, int, unsigned char, unsigned char); /* x25_timer.c */ -extern void x25_init_timers(struct sock *sk); -extern void x25_start_heartbeat(struct sock *); -extern void x25_start_t2timer(struct sock *); -extern void x25_start_t21timer(struct sock *); -extern void x25_start_t22timer(struct sock *); -extern void x25_start_t23timer(struct sock *); -extern void x25_stop_heartbeat(struct sock *); -extern void x25_stop_timer(struct sock *); -extern unsigned long x25_display_timer(struct sock *); -extern void x25_check_rbuf(struct sock *); +void x25_init_timers(struct sock *sk); +void x25_start_heartbeat(struct sock *); +void x25_start_t2timer(struct sock *); +void x25_start_t21timer(struct sock *); +void x25_start_t22timer(struct sock *); +void x25_start_t23timer(struct sock *); +void x25_stop_heartbeat(struct sock *); +void x25_stop_timer(struct sock *); +unsigned long x25_display_timer(struct sock *); +void x25_check_rbuf(struct sock *); /* sysctl_net_x25.c */ -extern void x25_register_sysctl(void); -extern void x25_unregister_sysctl(void); +#ifdef CONFIG_SYSCTL +void x25_register_sysctl(void); +void x25_unregister_sysctl(void); +#else +static inline void x25_register_sysctl(void) {}; +static inline void x25_unregister_sysctl(void) {}; +#endif /* CONFIG_SYSCTL */ + struct x25_skb_cb { - unsigned flags; + unsigned int flags; }; #define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb)) @@ -300,7 +316,9 @@ extern struct list_head x25_route_list; extern rwlock_t x25_route_list_lock; extern struct list_head x25_forward_list; extern rwlock_t x25_forward_list_lock; +extern struct list_head x25_neigh_list; +extern rwlock_t x25_neigh_list_lock; -extern int x25_proc_init(void); -extern void x25_proc_exit(void); +int x25_proc_init(void); +void x25_proc_exit(void); #endif diff --git a/include/net/x25device.h b/include/net/x25device.h index 1415bcf9398..1fa08b49f1c 100644 --- a/include/net/x25device.h +++ b/include/net/x25device.h @@ -3,6 +3,7 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> +#include <linux/if_x25.h> #include <linux/skbuff.h> static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 0d255ae008b..721e9c3b11b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -12,6 +12,7 @@ #include <linux/in6.h> #include <linux/mutex.h> #include <linux/audit.h> +#include <linux/slab.h> #include <net/sock.h> #include <net/dst.h> @@ -19,6 +20,10 @@ #include <net/route.h> #include <net/ipv6.h> #include <net/ip6_fib.h> +#include <net/flow.h> + +#include <linux/interrupt.h> + #ifdef CONFIG_XFRM_STATISTICS #include <net/snmp.h> #endif @@ -31,6 +36,7 @@ #define XFRM_PROTO_ROUTING IPPROTO_ROUTING #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS +#define XFRM_ALIGN4(len) (((len) + 3) & ~3) #define XFRM_ALIGN8(len) (((len) + 7) & ~7) #define MODULE_ALIAS_XFRM_MODE(family, encap) \ MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) @@ -38,23 +44,15 @@ MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto)) #ifdef CONFIG_XFRM_STATISTICS -DECLARE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); -#define XFRM_INC_STATS(field) SNMP_INC_STATS(xfrm_statistics, field) -#define XFRM_INC_STATS_BH(field) SNMP_INC_STATS_BH(xfrm_statistics, field) -#define XFRM_INC_STATS_USER(field) SNMP_INC_STATS_USER(xfrm_statistics, field) +#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) +#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field) +#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field) #else -#define XFRM_INC_STATS(field) -#define XFRM_INC_STATS_BH(field) -#define XFRM_INC_STATS_USER(field) +#define XFRM_INC_STATS(net, field) ((void)(net)) +#define XFRM_INC_STATS_BH(net, field) ((void)(net)) +#define XFRM_INC_STATS_USER(net, field) ((void)(net)) #endif -extern struct sock *xfrm_nl; -extern u32 sysctl_xfrm_aevent_etime; -extern u32 sysctl_xfrm_aevent_rseqth; -extern int sysctl_xfrm_larval_drop; -extern u32 sysctl_xfrm_acq_expires; - -extern struct mutex xfrm_cfg_mutex; /* Organization of SPD aka "XFRM rules" ------------------------------------ @@ -117,11 +115,24 @@ extern struct mutex xfrm_cfg_mutex; metrics. Plus, it will be made via sk->sk_dst_cache. Solved. */ +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + /* Full description of state of transformer. */ -struct xfrm_state -{ - /* Note: bydst is re-used during gc */ - struct hlist_node bydst; +struct xfrm_state { +#ifdef CONFIG_NET_NS + struct net *xs_net; +#endif + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; struct hlist_node bysrc; struct hlist_node byspi; @@ -130,15 +141,13 @@ struct xfrm_state struct xfrm_id id; struct xfrm_selector sel; + struct xfrm_mark mark; + u32 tfcpad; u32 genid; - /* Key manger bits */ - struct { - u8 state; - u8 dying; - u32 seq; - } km; + /* Key manager bits */ + struct xfrm_state_walk km; /* Parameters of this state. */ struct { @@ -151,12 +160,13 @@ struct xfrm_state xfrm_address_t saddr; int header_len; int trailer_len; + u32 extra_flags; } props; struct xfrm_lifetime_cfg lft; /* Data for transformer */ - struct xfrm_algo *aalg; + struct xfrm_algo_auth *aalg; struct xfrm_algo *ealg; struct xfrm_algo *calg; struct xfrm_algo_aead *aead; @@ -175,9 +185,14 @@ struct xfrm_state /* State for replay detection */ struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; /* Replay detection state at the time we sent the last notification */ struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + + /* The functions for replay detection. */ + struct xfrm_replay *repl; /* internal flag that only holds state for delayed aevent at the * moment @@ -195,7 +210,10 @@ struct xfrm_state struct xfrm_stats stats; struct xfrm_lifetime_cur curlft; - struct timer_list timer; + struct tasklet_hrtimer mtimer; + + /* used to fix curlft->add_time when changing date */ + long saved_tmo; /* Last used time */ unsigned long lastused; @@ -215,8 +233,14 @@ struct xfrm_state void *data; }; +static inline struct net *xs_net(struct xfrm_state *x) +{ + return read_pnet(&x->xs_net); +} + /* xflags - make enum if more show up */ #define XFRM_TIME_DEFER 1 +#define XFRM_SOFT_EXPIRE 2 enum { XFRM_STATE_VOID, @@ -228,8 +252,7 @@ enum { }; /* callback structure passed from either netlink or pfkey */ -struct km_event -{ +struct km_event { union { u32 hard; u32 proto; @@ -239,8 +262,21 @@ struct km_event } data; u32 seq; - u32 pid; + u32 portid; u32 event; + struct net *net; +}; + +struct xfrm_replay { + void (*advance)(struct xfrm_state *x, __be32 net_seq); + int (*check)(struct xfrm_state *x, + struct sk_buff *skb, + __be32 net_seq); + int (*recheck)(struct xfrm_state *x, + struct sk_buff *skb, + __be32 net_seq); + void (*notify)(struct xfrm_state *x, int event); + int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); }; struct net_device; @@ -249,31 +285,37 @@ struct xfrm_dst; struct xfrm_policy_afinfo { unsigned short family; struct dst_ops *dst_ops; - void (*garbage_collect)(void); - struct dst_entry *(*dst_lookup)(int tos, xfrm_address_t *saddr, - xfrm_address_t *daddr); - int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr); - struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy); + void (*garbage_collect)(struct net *net); + struct dst_entry *(*dst_lookup)(struct net *net, int tos, + const xfrm_address_t *saddr, + const xfrm_address_t *daddr); + int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr); void (*decode_session)(struct sk_buff *skb, struct flowi *fl, int reverse); - int (*get_tos)(struct flowi *fl); + int (*get_tos)(const struct flowi *fl); + void (*init_dst)(struct net *net, + struct xfrm_dst *dst); int (*init_path)(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len); int (*fill_dst)(struct xfrm_dst *xdst, - struct net_device *dev); + struct net_device *dev, + const struct flowi *fl); + struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); }; -extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); -extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); -extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); -extern void km_state_notify(struct xfrm_state *x, struct km_event *c); +int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); +int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +void km_policy_notify(struct xfrm_policy *xp, int dir, + const struct km_event *c); +void km_state_notify(struct xfrm_state *x, const struct km_event *c); struct xfrm_tmpl; -extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); -extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid); -extern int __xfrm_state_delete(struct xfrm_state *x); +int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, + struct xfrm_policy *pol); +void km_state_expired(struct xfrm_state *x, int hard, u32 portid); +int __xfrm_state_delete(struct xfrm_state *x); struct xfrm_state_afinfo { unsigned int family; @@ -283,31 +325,47 @@ struct xfrm_state_afinfo { const struct xfrm_type *type_map[IPPROTO_MAX]; struct xfrm_mode *mode_map[XFRM_MODE_MAX]; int (*init_flags)(struct xfrm_state *x); - void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, - struct xfrm_tmpl *tmpl, - xfrm_address_t *daddr, xfrm_address_t *saddr); + void (*init_tempsel)(struct xfrm_selector *sel, + const struct flowi *fl); + void (*init_temprop)(struct xfrm_state *x, + const struct xfrm_tmpl *tmpl, + const xfrm_address_t *daddr, + const xfrm_address_t *saddr); int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); - int (*output)(struct sk_buff *skb); + int (*output)(struct sock *sk, struct sk_buff *skb); + int (*output_finish)(struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, struct sk_buff *skb); int (*extract_output)(struct xfrm_state *x, struct sk_buff *skb); int (*transport_finish)(struct sk_buff *skb, int async); + void (*local_error)(struct sk_buff *skb, u32 mtu); +}; + +int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); +int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); +struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); +void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); + +struct xfrm_input_afinfo { + unsigned int family; + struct module *owner; + int (*callback)(struct sk_buff *skb, u8 protocol, + int err); }; -extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); -extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); +int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo); +int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo); -extern void xfrm_state_delete_tunnel(struct xfrm_state *x); +void xfrm_state_delete_tunnel(struct xfrm_state *x); -struct xfrm_type -{ +struct xfrm_type { char *description; struct module *owner; - __u8 proto; - __u8 flags; + u8 proto; + u8 flags; #define XFRM_TYPE_NON_FRAGMENT 1 #define XFRM_TYPE_REPLAY_PROT 2 #define XFRM_TYPE_LOCAL_COADDR 4 @@ -317,14 +375,15 @@ struct xfrm_type void (*destructor)(struct xfrm_state *); int (*input)(struct xfrm_state *, struct sk_buff *skb); int (*output)(struct xfrm_state *, struct sk_buff *pskb); - int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *); + int (*reject)(struct xfrm_state *, struct sk_buff *, + const struct flowi *); int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); /* Estimate maximal size of result of transformation of a dgram */ u32 (*get_mtu)(struct xfrm_state *, int size); }; -extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family); -extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); +int xfrm_register_type(const struct xfrm_type *type, unsigned short family); +int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); struct xfrm_mode { /* @@ -385,8 +444,8 @@ enum { XFRM_MODE_FLAG_TUNNEL = 1, }; -extern int xfrm_register_mode(struct xfrm_mode *mode, int family); -extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family); +int xfrm_register_mode(struct xfrm_mode *mode, int family); +int xfrm_unregister_mode(struct xfrm_mode *mode, int family); static inline int xfrm_af2proto(unsigned int family) { @@ -409,8 +468,7 @@ static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipp return x->inner_mode_iaf; } -struct xfrm_tmpl -{ +struct xfrm_tmpl { /* id in template is interpreted as: * daddr - destination of tunnel, may be zero for transport mode. * spi - zero to acquire spi. Not zero if spi is static, then @@ -424,28 +482,49 @@ struct xfrm_tmpl unsigned short encap_family; - __u32 reqid; + u32 reqid; /* Mode: transport, tunnel etc. */ - __u8 mode; + u8 mode; /* Sharing mode: unique, this session only, this user only etc. */ - __u8 share; + u8 share; /* May skip this transfomration if no SA is found */ - __u8 optional; + u8 optional; + +/* Skip aalgos/ealgos/calgos checks. */ + u8 allalgs; /* Bit mask of algos allowed for acquisition */ - __u32 aalgos; - __u32 ealgos; - __u32 calgos; + u32 aalgos; + u32 ealgos; + u32 calgos; }; #define XFRM_MAX_DEPTH 6 -struct xfrm_policy -{ - struct xfrm_policy *next; +struct xfrm_policy_walk_entry { + struct list_head all; + u8 dead; +}; + +struct xfrm_policy_walk { + struct xfrm_policy_walk_entry walk; + u8 type; + u32 seq; +}; + +struct xfrm_policy_queue { + struct sk_buff_head hold_queue; + struct timer_list hold_timer; + unsigned long timeout; +}; + +struct xfrm_policy { +#ifdef CONFIG_NET_NS + struct net *xp_net; +#endif struct hlist_node bydst; struct hlist_node byidx; @@ -454,23 +533,37 @@ struct xfrm_policy atomic_t refcnt; struct timer_list timer; + struct flow_cache_object flo; + atomic_t genid; u32 priority; u32 index; + struct xfrm_mark mark; struct xfrm_selector selector; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; - struct dst_entry *bundles; - u16 family; + struct xfrm_policy_walk_entry walk; + struct xfrm_policy_queue polq; u8 type; u8 action; u8 flags; - u8 dead; u8 xfrm_nr; - /* XXX 1 byte hole, try to pack */ + u16 family; struct xfrm_sec_ctx *security; struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; }; +static inline struct net *xp_net(const struct xfrm_policy *xp) +{ + return read_pnet(&xp->xp_net); +} + +struct xfrm_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + u32 reserved; + u16 family; +}; + struct xfrm_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; @@ -485,10 +578,6 @@ struct xfrm_migrate { }; #define XFRM_KM_TIMEOUT 30 -/* which seqno */ -#define XFRM_REPLAY_SEQ 1 -#define XFRM_REPLAY_OSEQ 2 -#define XFRM_REPLAY_SEQ_MASK 3 /* what happened */ #define XFRM_REPLAY_UPDATE XFRM_AE_CR #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE @@ -500,23 +589,39 @@ struct xfrm_migrate { /* default seq threshold size */ #define XFRM_AE_SEQT_SIZE 2 -struct xfrm_mgr -{ +struct xfrm_mgr { struct list_head list; char *id; - int (*notify)(struct xfrm_state *x, struct km_event *c); - int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); + int (*notify)(struct xfrm_state *x, const struct km_event *c); + int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp); struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); - int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c); - int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); - int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles); + int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c); + int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); + int (*migrate)(const struct xfrm_selector *sel, + u8 dir, u8 type, + const struct xfrm_migrate *m, + int num_bundles, + const struct xfrm_kmaddress *k); + bool (*is_alive)(const struct km_event *c); }; -extern int xfrm_register_km(struct xfrm_mgr *km); -extern int xfrm_unregister_km(struct xfrm_mgr *km); +int xfrm_register_km(struct xfrm_mgr *km); +int xfrm_unregister_km(struct xfrm_mgr *km); -extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; +struct xfrm_tunnel_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + + union { + struct ip_tunnel *ip4; + struct ip6_tnl *ip6; + } tunnel; +}; + +#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0])) /* * This structure is used for the duration where packets are being @@ -524,15 +629,18 @@ extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; * area beyond the generic IP part may be overwritten. */ struct xfrm_skb_cb { - union { - struct inet_skb_parm h4; - struct inet6_skb_parm h6; - } header; + struct xfrm_tunnel_skb_cb header; /* Sequence number for replay protection. */ union { - u64 output; - __be32 input; + struct { + __u32 low; + __u32 hi; + } output; + struct { + __be32 low; + __be32 hi; + } input; } seq; }; @@ -543,10 +651,7 @@ struct xfrm_skb_cb { * to transmit header information to the mode input/output functions. */ struct xfrm_mode_skb_cb { - union { - struct inet_skb_parm h4; - struct inet6_skb_parm h6; - } header; + struct xfrm_tunnel_skb_cb header; /* Copied from header for IPv4, always set to zero and DF for IPv6. */ __be16 id; @@ -578,10 +683,7 @@ struct xfrm_mode_skb_cb { * related information. */ struct xfrm_spi_skb_cb { - union { - struct inet_skb_parm h4; - struct inet6_skb_parm h6; - } header; + struct xfrm_tunnel_skb_cb header; unsigned int daddroff; unsigned int family; @@ -589,13 +691,6 @@ struct xfrm_spi_skb_cb { #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) -/* Audit Information */ -struct xfrm_audit -{ - u32 loginuid; - u32 secid; -}; - #ifdef CONFIG_AUDITSYSCALL static inline struct audit_buffer *xfrm_audit_start(const char *op) { @@ -611,45 +706,79 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op) return audit_buf; } -static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid, +static inline void xfrm_audit_helper_usrinfo(bool task_valid, struct audit_buffer *audit_buf) { - char *secctx; - u32 secctx_len; - - audit_log_format(audit_buf, " auid=%u", auid); - if (secid != 0 && - security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) { - audit_log_format(audit_buf, " subj=%s", secctx); - security_release_secctx(secctx, secctx_len); - } else - audit_log_task_context(audit_buf); -} - -extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, - u32 auid, u32 secid); -extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, - u32 auid, u32 secid); -extern void xfrm_audit_state_add(struct xfrm_state *x, int result, - u32 auid, u32 secid); -extern void xfrm_audit_state_delete(struct xfrm_state *x, int result, - u32 auid, u32 secid); -extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x, - struct sk_buff *skb); -extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family); -extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, - __be32 net_spi, __be32 net_seq); -extern void xfrm_audit_state_icvfail(struct xfrm_state *x, - struct sk_buff *skb, u8 proto); + const unsigned int auid = from_kuid(&init_user_ns, task_valid ? + audit_get_loginuid(current) : + INVALID_UID); + const unsigned int ses = task_valid ? audit_get_sessionid(current) : + (unsigned int) -1; + + audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses); + audit_log_task_context(audit_buf); +} + +void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid); +void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, + bool task_valid); +void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid); +void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid); +void xfrm_audit_state_replay_overflow(struct xfrm_state *x, + struct sk_buff *skb); +void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb, + __be32 net_seq); +void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family); +void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi, + __be32 net_seq); +void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb, + u8 proto); #else -#define xfrm_audit_policy_add(x, r, a, s) do { ; } while (0) -#define xfrm_audit_policy_delete(x, r, a, s) do { ; } while (0) -#define xfrm_audit_state_add(x, r, a, s) do { ; } while (0) -#define xfrm_audit_state_delete(x, r, a, s) do { ; } while (0) -#define xfrm_audit_state_replay_overflow(x, s) do { ; } while (0) -#define xfrm_audit_state_notfound_simple(s, f) do { ; } while (0) -#define xfrm_audit_state_notfound(s, f, sp, sq) do { ; } while (0) -#define xfrm_audit_state_icvfail(x, s, p) do { ; } while (0) + +static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, + bool task_valid) +{ +} + +static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, + bool task_valid) +{ +} + +static inline void xfrm_audit_state_add(struct xfrm_state *x, int result, + bool task_valid) +{ +} + +static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result, + bool task_valid) +{ +} + +static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x, + struct sk_buff *skb) +{ +} + +static inline void xfrm_audit_state_replay(struct xfrm_state *x, + struct sk_buff *skb, __be32 net_seq) +{ +} + +static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb, + u16 family) +{ +} + +static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, + __be32 net_spi, __be32 net_seq) +{ +} + +static inline void xfrm_audit_state_icvfail(struct xfrm_state *x, + struct sk_buff *skb, u8 proto) +{ +} #endif /* CONFIG_AUDITSYSCALL */ static inline void xfrm_pol_hold(struct xfrm_policy *policy) @@ -658,7 +787,7 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy) atomic_inc(&policy->refcnt); } -extern void xfrm_policy_destroy(struct xfrm_policy *policy); +void xfrm_policy_destroy(struct xfrm_policy *policy); static inline void xfrm_pol_put(struct xfrm_policy *policy) { @@ -666,21 +795,14 @@ static inline void xfrm_pol_put(struct xfrm_policy *policy) xfrm_policy_destroy(policy); } -#ifdef CONFIG_XFRM_SUB_POLICY static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) { int i; for (i = npols - 1; i >= 0; --i) xfrm_pol_put(pols[i]); } -#else -static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) -{ - xfrm_pol_put(pols[0]); -} -#endif -extern void __xfrm_state_destroy(struct xfrm_state *); +void __xfrm_state_destroy(struct xfrm_state *); static inline void __xfrm_state_put(struct xfrm_state *x) { @@ -698,19 +820,20 @@ static inline void xfrm_state_hold(struct xfrm_state *x) atomic_inc(&x->refcnt); } -static __inline__ int addr_match(void *token1, void *token2, int prefixlen) +static inline bool addr_match(const void *token1, const void *token2, + int prefixlen) { - __be32 *a1 = token1; - __be32 *a2 = token2; + const __be32 *a1 = token1; + const __be32 *a2 = token2; int pdw; int pbi; - pdw = prefixlen >> 5; /* num of whole __u32 in prefix */ + pdw = prefixlen >> 5; /* num of whole u32 in prefix */ pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ if (pdw) if (memcmp(a1, a2, pdw << 2)) - return 0; + return false; if (pbi) { __be32 mask; @@ -718,29 +841,40 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen) mask = htonl((0xffffffff) << (32 - pbi)); if ((a1[pdw] ^ a2[pdw]) & mask) - return 0; + return false; } - return 1; + return true; +} + +static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) +{ + /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ + if (prefixlen == 0) + return true; + return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen))); } static __inline__ -__be16 xfrm_flowi_sport(struct flowi *fl) +__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) { __be16 port; - switch(fl->proto) { + switch(fl->flowi_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_SCTP: - port = fl->fl_ip_sport; + port = uli->ports.sport; break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: - port = htons(fl->fl_icmp_type); + port = htons(uli->icmpt.type); break; case IPPROTO_MH: - port = htons(fl->fl_mh_type); + port = htons(uli->mht.type); + break; + case IPPROTO_GRE: + port = htons(ntohl(uli->gre_key) >> 16); break; default: port = 0; /*XXX*/ @@ -749,19 +883,22 @@ __be16 xfrm_flowi_sport(struct flowi *fl) } static __inline__ -__be16 xfrm_flowi_dport(struct flowi *fl) +__be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) { __be16 port; - switch(fl->proto) { + switch(fl->flowi_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_SCTP: - port = fl->fl_ip_dport; + port = uli->ports.dport; break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: - port = htons(fl->fl_icmp_code); + port = htons(uli->icmpt.code); + break; + case IPPROTO_GRE: + port = htons(ntohl(uli->gre_key) & 0xffff); break; default: port = 0; /*XXX*/ @@ -769,14 +906,14 @@ __be16 xfrm_flowi_dport(struct flowi *fl) return port; } -extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl, - unsigned short family); +bool xfrm_selector_match(const struct xfrm_selector *sel, + const struct flowi *fl, unsigned short family); #ifdef CONFIG_SECURITY_NETWORK_XFRM /* If neither has a context --> match * Otherwise, both must have a context and the sids, doi, alg must match */ -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { return ((!s1 && !s2) || (s1 && s2 && @@ -785,9 +922,9 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct (s1->ctx_alg == s2->ctx_alg))); } #else -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { - return 1; + return true; } #endif @@ -802,27 +939,32 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct * bundles differing by session id. All the bundles grow from a parent * policy rule. */ -struct xfrm_dst -{ +struct xfrm_dst { union { struct dst_entry dst; struct rtable rt; struct rt6_info rt6; } u; struct dst_entry *route; + struct flow_cache_object flo; + struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; + int num_pols, num_xfrms; #ifdef CONFIG_XFRM_SUB_POLICY struct flowi *origin; struct xfrm_selector *partner; #endif - u32 genid; + u32 xfrm_genid; + u32 policy_genid; u32 route_mtu_cached; u32 child_mtu_cached; u32 route_cookie; u32 path_cookie; }; +#ifdef CONFIG_XFRM static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) { + xfrm_pols_put(xdst->pols, xdst->num_pols); dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); @@ -833,16 +975,25 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) xdst->partner = NULL; #endif } +#endif -extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); +void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); -struct sec_path -{ +struct sec_path { atomic_t refcnt; int len; struct xfrm_state *xvec[XFRM_MAX_DEPTH]; }; +static inline int secpath_exists(struct sk_buff *skb) +{ +#ifdef CONFIG_XFRM + return skb->sp != NULL; +#else + return 0; +#endif +} + static inline struct sec_path * secpath_get(struct sec_path *sp) { @@ -851,7 +1002,7 @@ secpath_get(struct sec_path *sp) return sp; } -extern void __secpath_destroy(struct sec_path *sp); +void __secpath_destroy(struct sec_path *sp); static inline void secpath_put(struct sec_path *sp) @@ -860,7 +1011,7 @@ secpath_put(struct sec_path *sp) __secpath_destroy(sp); } -extern struct sec_path *secpath_dup(struct sec_path *src); +struct sec_path *secpath_dup(struct sec_path *src); static inline void secpath_reset(struct sk_buff *skb) @@ -872,7 +1023,7 @@ secpath_reset(struct sk_buff *skb) } static inline int -xfrm_addr_any(xfrm_address_t *addr, unsigned short family) +xfrm_addr_any(const xfrm_address_t *addr, unsigned short family) { switch (family) { case AF_INET: @@ -884,21 +1035,21 @@ xfrm_addr_any(xfrm_address_t *addr, unsigned short family) } static inline int -__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x) +__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) { return (tmpl->saddr.a4 && tmpl->saddr.a4 != x->props.saddr.a4); } static inline int -__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x) +__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) { return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && - ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); + !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); } static inline int -xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family) +xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family) { switch (family) { case AF_INET: @@ -910,19 +1061,21 @@ xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short } #ifdef CONFIG_XFRM -extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); +int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, + unsigned short family); static inline int __xfrm_policy_check2(struct sock *sk, int dir, struct sk_buff *skb, unsigned int family, int reverse) { + struct net *net = dev_net(skb->dev); int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0); if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, ndir, skb, family); - return (!xfrm_policy_count[dir] && !skb->sp) || - (skb->dst->flags & DST_NOPOLICY) || + return (!net->xfrm.policy_count[dir] && !skb->sp) || + (skb_dst(skb)->flags & DST_NOPOLICY) || __xfrm_policy_check(sk, ndir, skb, family); } @@ -953,8 +1106,8 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); } -extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, - unsigned int family, int reverse); +int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, + unsigned int family, int reverse); static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family) @@ -969,12 +1122,14 @@ static inline int xfrm_decode_session_reverse(struct sk_buff *skb, return __xfrm_decode_session(skb, fl, family, 1); } -extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); +int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) { - return !xfrm_policy_count[XFRM_POLICY_OUT] || - (skb->dst->flags & DST_NOXFRM) || + struct net *net = dev_net(skb->dev); + + return !net->xfrm.policy_count[XFRM_POLICY_OUT] || + (skb_dst(skb)->flags & DST_NOXFRM) || __xfrm_route_forward(skb, family); } @@ -988,7 +1143,7 @@ static inline int xfrm6_route_forward(struct sk_buff *skb) return xfrm_route_forward(skb, AF_INET6); } -extern int __xfrm_sk_clone_policy(struct sock *sk); +int __xfrm_sk_clone_policy(struct sock *sk); static inline int xfrm_sk_clone_policy(struct sock *sk) { @@ -997,7 +1152,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk) return 0; } -extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir); +int xfrm_policy_delete(struct xfrm_policy *pol, int dir); static inline void xfrm_sk_free_policy(struct sock *sk) { @@ -1011,6 +1166,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } } +void xfrm_garbage_collect(struct net *net); + #else static inline void xfrm_sk_free_policy(struct sock *sk) {} @@ -1045,35 +1202,55 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, { return 1; } +static inline void xfrm_garbage_collect(struct net *net) +{ +} #endif static __inline__ -xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family) +xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family) { switch (family){ case AF_INET: - return (xfrm_address_t *)&fl->fl4_dst; + return (xfrm_address_t *)&fl->u.ip4.daddr; case AF_INET6: - return (xfrm_address_t *)&fl->fl6_dst; + return (xfrm_address_t *)&fl->u.ip6.daddr; } return NULL; } static __inline__ -xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family) +xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family) { switch (family){ case AF_INET: - return (xfrm_address_t *)&fl->fl4_src; + return (xfrm_address_t *)&fl->u.ip4.saddr; case AF_INET6: - return (xfrm_address_t *)&fl->fl6_src; + return (xfrm_address_t *)&fl->u.ip6.saddr; } return NULL; } +static __inline__ +void xfrm_flowi_addr_get(const struct flowi *fl, + xfrm_address_t *saddr, xfrm_address_t *daddr, + unsigned short family) +{ + switch(family) { + case AF_INET: + memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); + memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); + break; + case AF_INET6: + *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr; + *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr; + break; + } +} + static __inline__ int -__xfrm4_state_addr_check(struct xfrm_state *x, - xfrm_address_t *daddr, xfrm_address_t *saddr) +__xfrm4_state_addr_check(const struct xfrm_state *x, + const xfrm_address_t *daddr, const xfrm_address_t *saddr) { if (daddr->a4 == x->id.daddr.a4 && (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) @@ -1082,11 +1259,11 @@ __xfrm4_state_addr_check(struct xfrm_state *x, } static __inline__ int -__xfrm6_state_addr_check(struct xfrm_state *x, - xfrm_address_t *daddr, xfrm_address_t *saddr) +__xfrm6_state_addr_check(const struct xfrm_state *x, + const xfrm_address_t *daddr, const xfrm_address_t *saddr) { - if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && - (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)|| + if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && + (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) || ipv6_addr_any((struct in6_addr *)saddr) || ipv6_addr_any((struct in6_addr *)&x->props.saddr))) return 1; @@ -1094,8 +1271,8 @@ __xfrm6_state_addr_check(struct xfrm_state *x, } static __inline__ int -xfrm_state_addr_check(struct xfrm_state *x, - xfrm_address_t *daddr, xfrm_address_t *saddr, +xfrm_state_addr_check(const struct xfrm_state *x, + const xfrm_address_t *daddr, const xfrm_address_t *saddr, unsigned short family) { switch (family) { @@ -1108,23 +1285,23 @@ xfrm_state_addr_check(struct xfrm_state *x, } static __inline__ int -xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl, +xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl, unsigned short family) { switch (family) { case AF_INET: return __xfrm4_state_addr_check(x, - (xfrm_address_t *)&fl->fl4_dst, - (xfrm_address_t *)&fl->fl4_src); + (const xfrm_address_t *)&fl->u.ip4.daddr, + (const xfrm_address_t *)&fl->u.ip4.saddr); case AF_INET6: return __xfrm6_state_addr_check(x, - (xfrm_address_t *)&fl->fl6_dst, - (xfrm_address_t *)&fl->fl6_src); + (const xfrm_address_t *)&fl->u.ip6.daddr, + (const xfrm_address_t *)&fl->u.ip6.saddr); } return 0; } -static inline int xfrm_state_kern(struct xfrm_state *x) +static inline int xfrm_state_kern(const struct xfrm_state *x) { return atomic_read(&x->tunnel_users); } @@ -1162,6 +1339,7 @@ struct xfrm_algo_desc { char *name; char *compat; u8 available:1; + u8 pfkey_supported:1; union { struct xfrm_algo_aead_info aead; struct xfrm_algo_auth_info auth; @@ -1171,32 +1349,58 @@ struct xfrm_algo_desc { struct sadb_alg desc; }; +/* XFRM protocol handlers. */ +struct xfrm4_protocol { + int (*handler)(struct sk_buff *skb); + int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); + int (*cb_handler)(struct sk_buff *skb, int err); + int (*err_handler)(struct sk_buff *skb, u32 info); + + struct xfrm4_protocol __rcu *next; + int priority; +}; + +struct xfrm6_protocol { + int (*handler)(struct sk_buff *skb); + int (*cb_handler)(struct sk_buff *skb, int err); + int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info); + + struct xfrm6_protocol __rcu *next; + int priority; +}; + /* XFRM tunnel handlers. */ struct xfrm_tunnel { int (*handler)(struct sk_buff *skb); - int (*err_handler)(struct sk_buff *skb, __u32 info); + int (*err_handler)(struct sk_buff *skb, u32 info); - struct xfrm_tunnel *next; + struct xfrm_tunnel __rcu *next; int priority; }; struct xfrm6_tunnel { int (*handler)(struct sk_buff *skb); int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, - int type, int code, int offset, __be32 info); - struct xfrm6_tunnel *next; + u8 type, u8 code, int offset, __be32 info); + struct xfrm6_tunnel __rcu *next; int priority; }; -extern void xfrm_init(void); -extern void xfrm4_init(void); -extern void xfrm_state_init(void); -extern void xfrm4_state_init(void); +void xfrm_init(void); +void xfrm4_init(void); +int xfrm_state_init(struct net *net); +void xfrm_state_fini(struct net *net); +void xfrm4_state_init(void); +void xfrm4_protocol_init(void); #ifdef CONFIG_XFRM -extern int xfrm6_init(void); -extern void xfrm6_fini(void); -extern int xfrm6_state_init(void); -extern void xfrm6_state_fini(void); +int xfrm6_init(void); +void xfrm6_fini(void); +int xfrm6_state_init(void); +void xfrm6_state_fini(void); +int xfrm6_protocol_init(void); +void xfrm6_protocol_fini(void); #else static inline int xfrm6_init(void) { @@ -1209,33 +1413,58 @@ static inline void xfrm6_fini(void) #endif #ifdef CONFIG_XFRM_STATISTICS -extern int xfrm_proc_init(void); +int xfrm_proc_init(struct net *net); +void xfrm_proc_fini(struct net *net); +#endif + +int xfrm_sysctl_init(struct net *net); +#ifdef CONFIG_SYSCTL +void xfrm_sysctl_fini(struct net *net); +#else +static inline void xfrm_sysctl_fini(struct net *net) +{ +} #endif -extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *); -extern struct xfrm_state *xfrm_state_alloc(void); -extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, - struct flowi *fl, struct xfrm_tmpl *tmpl, - struct xfrm_policy *pol, int *err, - unsigned short family); -extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr, - xfrm_address_t *saddr, - unsigned short family, - u8 mode, u8 proto, u32 reqid); -extern int xfrm_state_check_expire(struct xfrm_state *x); -extern void xfrm_state_insert(struct xfrm_state *x); -extern int xfrm_state_add(struct xfrm_state *x); -extern int xfrm_state_update(struct xfrm_state *x); -extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family); -extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family); +void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, + struct xfrm_address_filter *filter); +int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, + int (*func)(struct xfrm_state *, int, void*), void *); +void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); +struct xfrm_state *xfrm_state_alloc(struct net *net); +struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + const struct flowi *fl, + struct xfrm_tmpl *tmpl, + struct xfrm_policy *pol, int *err, + unsigned short family); +struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, + xfrm_address_t *daddr, + xfrm_address_t *saddr, + unsigned short family, + u8 mode, u8 proto, u32 reqid); +struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, + unsigned short family); +int xfrm_state_check_expire(struct xfrm_state *x); +void xfrm_state_insert(struct xfrm_state *x); +int xfrm_state_add(struct xfrm_state *x); +int xfrm_state_update(struct xfrm_state *x); +struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark, + const xfrm_address_t *daddr, __be32 spi, + u8 proto, unsigned short family); +struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark, + const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + u8 proto, + unsigned short family); #ifdef CONFIG_XFRM_SUB_POLICY -extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, - int n, unsigned short family); -extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, - int n, unsigned short family); +int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, + unsigned short family, struct net *net); +int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, + unsigned short family); #else static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, - int n, unsigned short family) + int n, unsigned short family, struct net *net) { return -ENOSYS; } @@ -1264,62 +1493,74 @@ struct xfrmk_spdinfo { u32 spdhmcnt; }; -extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); -extern int xfrm_state_delete(struct xfrm_state *x); -extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); -extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); -extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); -extern int xfrm_replay_check(struct xfrm_state *x, - struct sk_buff *skb, __be32 seq); -extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); -extern void xfrm_replay_notify(struct xfrm_state *x, int event); -extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); -extern int xfrm_init_state(struct xfrm_state *x); -extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type); -extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr); -extern int xfrm_output_resume(struct sk_buff *skb, int err); -extern int xfrm_output(struct sk_buff *skb); -extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm4_extract_header(struct sk_buff *skb); -extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type); -extern int xfrm4_transport_finish(struct sk_buff *skb, int async); -extern int xfrm4_rcv(struct sk_buff *skb); +struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); +int xfrm_state_delete(struct xfrm_state *x); +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); +void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); +void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); +u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); +int xfrm_init_replay(struct xfrm_state *x); +int xfrm_state_mtu(struct xfrm_state *x, int mtu); +int __xfrm_init_state(struct xfrm_state *x, bool init_replay); +int xfrm_init_state(struct xfrm_state *x); +int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); +int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); +int xfrm_input_resume(struct sk_buff *skb, int nexthdr); +int xfrm_output_resume(struct sk_buff *skb, int err); +int xfrm_output(struct sk_buff *skb); +int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); +void xfrm_local_error(struct sk_buff *skb, int mtu); +int xfrm4_extract_header(struct sk_buff *skb); +int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); +int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); +int xfrm4_transport_finish(struct sk_buff *skb, int async); +int xfrm4_rcv(struct sk_buff *skb); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { - return xfrm4_rcv_encap(skb, nexthdr, spi, 0); -} - -extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm4_output(struct sk_buff *skb); -extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); -extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); -extern int xfrm6_extract_header(struct sk_buff *skb); -extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); -extern int xfrm6_transport_finish(struct sk_buff *skb, int async); -extern int xfrm6_rcv(struct sk_buff *skb); -extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, - xfrm_address_t *saddr, u8 proto); -extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); -extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); -extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr); -extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr); -extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr); -extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); -extern int xfrm6_output(struct sk_buff *skb); -extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, - u8 **prevhdr); + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + return xfrm_input(skb, nexthdr, spi, 0); +} + +int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); +int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); +int xfrm4_output(struct sock *sk, struct sk_buff *skb); +int xfrm4_output_finish(struct sk_buff *skb); +int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); +int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); +int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); +int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); +int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); +void xfrm4_local_error(struct sk_buff *skb, u32 mtu); +int xfrm6_extract_header(struct sk_buff *skb); +int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); +int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); +int xfrm6_transport_finish(struct sk_buff *skb, int async); +int xfrm6_rcv(struct sk_buff *skb); +int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, + xfrm_address_t *saddr, u8 proto); +void xfrm6_local_error(struct sk_buff *skb, u32 mtu); +int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err); +int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol); +int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol); +int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); +int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); +__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); +__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); +int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); +int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); +int xfrm6_output(struct sock *sk, struct sk_buff *skb); +int xfrm6_output_finish(struct sk_buff *skb); +int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, + u8 **prevhdr); #ifdef CONFIG_XFRM -extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); -extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen); +int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); +int xfrm_user_policy(struct sock *sk, int optname, + u8 __user *optval, int optlen); #else static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) { @@ -1334,74 +1575,83 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) } #endif -struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); -extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *); +struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); + +void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); +int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, + int (*func)(struct xfrm_policy *, int, int, void*), + void *); +void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, + u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err); -int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info); +struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, + u32 id, int delete, int *err); +int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); u32 xfrm_get_acqseq(void); -extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); -struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, - xfrm_address_t *daddr, xfrm_address_t *saddr, - int create, unsigned short family); -extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); -extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, - struct flowi *fl, int family, int strict); +int verify_spi_info(u8 proto, u32 min, u32 max); +int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); +struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, + u8 mode, u32 reqid, u8 proto, + const xfrm_address_t *daddr, + const xfrm_address_t *saddr, int create, + unsigned short family); +int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); #ifdef CONFIG_XFRM_MIGRATE -extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, - struct xfrm_migrate *m, int num_bundles); -extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m); -extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x, - struct xfrm_migrate *m); -extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type, - struct xfrm_migrate *m, int num_bundles); +int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + const struct xfrm_migrate *m, int num_bundles, + const struct xfrm_kmaddress *k); +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net); +struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, + struct xfrm_migrate *m); +int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + struct xfrm_migrate *m, int num_bundles, + struct xfrm_kmaddress *k, struct net *net); #endif -extern wait_queue_head_t km_waitq; -extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); -extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid); -extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); - -extern void xfrm_input_init(void); -extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); - -extern void xfrm_probe_algs(void); -extern int xfrm_count_auth_supported(void); -extern int xfrm_count_enc_supported(void); -extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx); -extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx); -extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id); -extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id); -extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id); -extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe); -extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); -extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); -extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len, - int probe); - -struct hash_desc; -struct scatterlist; -typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *, - unsigned int); - -extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm, - int offset, int len, icv_update_fn_t icv_update); - -static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, - int family) +int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); +void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid); +int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, + xfrm_address_t *addr); + +void xfrm_input_init(void); +int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); + +void xfrm_probe_algs(void); +int xfrm_count_pfkey_auth_supported(void); +int xfrm_count_pfkey_enc_supported(void); +struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx); +struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx); +struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id); +struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id); +struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id); +struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe); +struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe); +struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe); +struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, + int probe); + +static inline bool xfrm6_addr_equal(const xfrm_address_t *a, + const xfrm_address_t *b) +{ + return ipv6_addr_equal((const struct in6_addr *)a, + (const struct in6_addr *)b); +} + +static inline bool xfrm_addr_equal(const xfrm_address_t *a, + const xfrm_address_t *b, + sa_family_t family) { switch (family) { default: case AF_INET: - return (__force __u32)a->a4 - (__force __u32)b->a4; + return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0; case AF_INET6: - return ipv6_addr_cmp((struct in6_addr *)a, - (struct in6_addr *)b); + return xfrm6_addr_equal(a, b); } } @@ -1410,30 +1660,94 @@ static inline int xfrm_policy_id2dir(u32 index) return index & 7; } -static inline int xfrm_aevent_is_on(void) +#ifdef CONFIG_XFRM +static inline int xfrm_aevent_is_on(struct net *net) { struct sock *nlsk; int ret = 0; rcu_read_lock(); - nlsk = rcu_dereference(xfrm_nl); + nlsk = rcu_dereference(net->xfrm.nlsk); if (nlsk) ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS); rcu_read_unlock(); return ret; } -static inline int xfrm_alg_len(struct xfrm_algo *alg) +static inline int xfrm_acquire_is_on(struct net *net) +{ + struct sock *nlsk; + int ret = 0; + + rcu_read_lock(); + nlsk = rcu_dereference(net->xfrm.nlsk); + if (nlsk) + ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE); + rcu_read_unlock(); + + return ret; +} +#endif + +static inline int aead_len(struct xfrm_algo_aead *alg) +{ + return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); +} + +static inline int xfrm_alg_len(const struct xfrm_algo *alg) +{ + return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); +} + +static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg) { return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); } +static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn) +{ + return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32); +} + #ifdef CONFIG_XFRM_MIGRATE +static inline int xfrm_replay_clone(struct xfrm_state *x, + struct xfrm_state *orig) +{ + x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), + GFP_KERNEL); + if (!x->replay_esn) + return -ENOMEM; + + x->replay_esn->bmp_len = orig->replay_esn->bmp_len; + x->replay_esn->replay_window = orig->replay_esn->replay_window; + + x->preplay_esn = kmemdup(x->replay_esn, + xfrm_replay_state_esn_len(x->replay_esn), + GFP_KERNEL); + if (!x->preplay_esn) { + kfree(x->replay_esn); + return -ENOMEM; + } + + return 0; +} + +static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) +{ + return kmemdup(orig, aead_len(orig), GFP_KERNEL); +} + + static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) { return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); } +static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig) +{ + return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL); +} + static inline void xfrm_states_put(struct xfrm_state **states, int n) { int i; @@ -1449,9 +1763,50 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n) } #endif +#ifdef CONFIG_XFRM static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +#endif + +static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) +{ + if (attrs[XFRMA_MARK]) + memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark)); + else + m->v = m->m = 0; + + return m->v & m->m; +} +static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) +{ + int ret = 0; + + if (m->m | m->v) + ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); + return ret; +} + +static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, + unsigned int family) +{ + bool tunnel = false; + + switch(family) { + case AF_INET: + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) + tunnel = true; + break; + case AF_INET6: + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) + tunnel = true; + break; + } + if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)) + return -EINVAL; + + return 0; +} #endif /* _NET_XFRM_H */ |
