diff options
Diffstat (limited to 'net/bluetooth')
29 files changed, 9830 insertions, 2725 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c new file mode 100644 index 00000000000..8796ffa08b4 --- /dev/null +++ b/net/bluetooth/6lowpan.c @@ -0,0 +1,865 @@ +/* +   Copyright (c) 2013 Intel Corp. + +   This program is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License version 2 and +   only version 2 as published by the Free Software Foundation. + +   This program is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +   GNU General Public License for more details. +*/ + +#include <linux/if_arp.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/addrconf.h> + +#include <net/af_ieee802154.h> /* to get the address type */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "6lowpan.h" + +#include <net/6lowpan.h> /* for the compression support */ + +#define IFACE_NAME_TEMPLATE "bt%d" +#define EUI64_ADDR_LEN 8 + +struct skb_cb { +	struct in6_addr addr; +	struct l2cap_conn *conn; +}; +#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) + +/* The devices list contains those devices that we are acting + * as a proxy. The BT 6LoWPAN device is a virtual device that + * connects to the Bluetooth LE device. The real connection to + * BT device is done via l2cap layer. There exists one + * virtual device / one BT 6LoWPAN network (=hciX device). + * The list contains struct lowpan_dev elements. + */ +static LIST_HEAD(bt_6lowpan_devices); +static DEFINE_RWLOCK(devices_lock); + +struct lowpan_peer { +	struct list_head list; +	struct l2cap_conn *conn; + +	/* peer addresses in various formats */ +	unsigned char eui64_addr[EUI64_ADDR_LEN]; +	struct in6_addr peer_addr; +}; + +struct lowpan_dev { +	struct list_head list; + +	struct hci_dev *hdev; +	struct net_device *netdev; +	struct list_head peers; +	atomic_t peer_count; /* number of items in peers list */ + +	struct work_struct delete_netdev; +	struct delayed_work notify_peers; +}; + +static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) +{ +	return netdev_priv(netdev); +} + +static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) +{ +	list_add(&peer->list, &dev->peers); +	atomic_inc(&dev->peer_count); +} + +static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) +{ +	list_del(&peer->list); + +	if (atomic_dec_and_test(&dev->peer_count)) { +		BT_DBG("last peer"); +		return true; +	} + +	return false; +} + +static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, +						 bdaddr_t *ba, __u8 type) +{ +	struct lowpan_peer *peer, *tmp; + +	BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count), +	       ba, type); + +	list_for_each_entry_safe(peer, tmp, &dev->peers, list) { +		BT_DBG("addr %pMR type %d", +		       &peer->conn->hcon->dst, peer->conn->hcon->dst_type); + +		if (bacmp(&peer->conn->hcon->dst, ba)) +			continue; + +		if (type == peer->conn->hcon->dst_type) +			return peer; +	} + +	return NULL; +} + +static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, +						   struct l2cap_conn *conn) +{ +	struct lowpan_peer *peer, *tmp; + +	list_for_each_entry_safe(peer, tmp, &dev->peers, list) { +		if (peer->conn == conn) +			return peer; +	} + +	return NULL; +} + +static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) +{ +	struct lowpan_dev *entry, *tmp; +	struct lowpan_peer *peer = NULL; +	unsigned long flags; + +	read_lock_irqsave(&devices_lock, flags); + +	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { +		peer = peer_lookup_conn(entry, conn); +		if (peer) +			break; +	} + +	read_unlock_irqrestore(&devices_lock, flags); + +	return peer; +} + +static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) +{ +	struct lowpan_dev *entry, *tmp; +	struct lowpan_dev *dev = NULL; +	unsigned long flags; + +	read_lock_irqsave(&devices_lock, flags); + +	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { +		if (conn->hcon->hdev == entry->hdev) { +			dev = entry; +			break; +		} +	} + +	read_unlock_irqrestore(&devices_lock, flags); + +	return dev; +} + +static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) +{ +	struct sk_buff *skb_cp; +	int ret; + +	skb_cp = skb_copy(skb, GFP_ATOMIC); +	if (!skb_cp) +		return -ENOMEM; + +	ret = netif_rx(skb_cp); + +	BT_DBG("receive skb %d", ret); +	if (ret < 0) +		return NET_RX_DROP; + +	return ret; +} + +static int process_data(struct sk_buff *skb, struct net_device *netdev, +			struct l2cap_conn *conn) +{ +	const u8 *saddr, *daddr; +	u8 iphc0, iphc1; +	struct lowpan_dev *dev; +	struct lowpan_peer *peer; +	unsigned long flags; + +	dev = lowpan_dev(netdev); + +	read_lock_irqsave(&devices_lock, flags); +	peer = peer_lookup_conn(dev, conn); +	read_unlock_irqrestore(&devices_lock, flags); +	if (!peer) +		goto drop; + +	saddr = peer->eui64_addr; +	daddr = dev->netdev->dev_addr; + +	/* at least two bytes will be used for the encoding */ +	if (skb->len < 2) +		goto drop; + +	if (lowpan_fetch_skb_u8(skb, &iphc0)) +		goto drop; + +	if (lowpan_fetch_skb_u8(skb, &iphc1)) +		goto drop; + +	return lowpan_process_data(skb, netdev, +				   saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, +				   daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, +				   iphc0, iphc1, give_skb_to_upper); + +drop: +	kfree_skb(skb); +	return -EINVAL; +} + +static int recv_pkt(struct sk_buff *skb, struct net_device *dev, +		    struct l2cap_conn *conn) +{ +	struct sk_buff *local_skb; +	int ret; + +	if (!netif_running(dev)) +		goto drop; + +	if (dev->type != ARPHRD_6LOWPAN) +		goto drop; + +	/* check that it's our buffer */ +	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { +		/* Copy the packet so that the IPv6 header is +		 * properly aligned. +		 */ +		local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1, +					    skb_tailroom(skb), GFP_ATOMIC); +		if (!local_skb) +			goto drop; + +		local_skb->protocol = htons(ETH_P_IPV6); +		local_skb->pkt_type = PACKET_HOST; + +		skb_reset_network_header(local_skb); +		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); + +		if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { +			kfree_skb(local_skb); +			goto drop; +		} + +		dev->stats.rx_bytes += skb->len; +		dev->stats.rx_packets++; + +		kfree_skb(local_skb); +		kfree_skb(skb); +	} else { +		switch (skb->data[0] & 0xe0) { +		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */ +			local_skb = skb_clone(skb, GFP_ATOMIC); +			if (!local_skb) +				goto drop; + +			ret = process_data(local_skb, dev, conn); +			if (ret != NET_RX_SUCCESS) +				goto drop; + +			dev->stats.rx_bytes += skb->len; +			dev->stats.rx_packets++; + +			kfree_skb(skb); +			break; +		default: +			break; +		} +	} + +	return NET_RX_SUCCESS; + +drop: +	kfree_skb(skb); +	return NET_RX_DROP; +} + +/* Packet from BT LE device */ +int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb) +{ +	struct lowpan_dev *dev; +	struct lowpan_peer *peer; +	int err; + +	peer = lookup_peer(conn); +	if (!peer) +		return -ENOENT; + +	dev = lookup_dev(conn); +	if (!dev || !dev->netdev) +		return -ENOENT; + +	err = recv_pkt(skb, dev->netdev, conn); +	BT_DBG("recv pkt %d", err); + +	return err; +} + +static inline int skbuff_copy(void *msg, int len, int count, int mtu, +			      struct sk_buff *skb, struct net_device *dev) +{ +	struct sk_buff **frag; +	int sent = 0; + +	memcpy(skb_put(skb, count), msg, count); + +	sent += count; +	msg  += count; +	len  -= count; + +	dev->stats.tx_bytes += count; +	dev->stats.tx_packets++; + +	raw_dump_table(__func__, "Sending", skb->data, skb->len); + +	/* Continuation fragments (no L2CAP header) */ +	frag = &skb_shinfo(skb)->frag_list; +	while (len > 0) { +		struct sk_buff *tmp; + +		count = min_t(unsigned int, mtu, len); + +		tmp = bt_skb_alloc(count, GFP_ATOMIC); +		if (!tmp) +			return -ENOMEM; + +		*frag = tmp; + +		memcpy(skb_put(*frag, count), msg, count); + +		raw_dump_table(__func__, "Sending fragment", +			       (*frag)->data, count); + +		(*frag)->priority = skb->priority; + +		sent += count; +		msg  += count; +		len  -= count; + +		skb->len += (*frag)->len; +		skb->data_len += (*frag)->len; + +		frag = &(*frag)->next; + +		dev->stats.tx_bytes += count; +		dev->stats.tx_packets++; +	} + +	return sent; +} + +static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg, +				  size_t len, u32 priority, +				  struct net_device *dev) +{ +	struct sk_buff *skb; +	int err, count; +	struct l2cap_hdr *lh; + +	/* FIXME: This mtu check should be not needed and atm is only used for +	 * testing purposes +	 */ +	if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE)) +		conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE; + +	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); + +	BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count); + +	skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC); +	if (!skb) +		return ERR_PTR(-ENOMEM); + +	skb->priority = priority; + +	lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE); +	lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN); +	lh->len = cpu_to_le16(len); + +	err = skbuff_copy(msg, len, count, conn->mtu, skb, dev); +	if (unlikely(err < 0)) { +		kfree_skb(skb); +		BT_DBG("skbuff copy %d failed", err); +		return ERR_PTR(err); +	} + +	return skb; +} + +static int conn_send(struct l2cap_conn *conn, +		     void *msg, size_t len, u32 priority, +		     struct net_device *dev) +{ +	struct sk_buff *skb; + +	skb = create_pdu(conn, msg, len, priority, dev); +	if (IS_ERR(skb)) +		return -EINVAL; + +	BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len, +	       skb->priority); + +	hci_send_acl(conn->hchan, skb, ACL_START); + +	return 0; +} + +static u8 get_addr_type_from_eui64(u8 byte) +{ +	/* Is universal(0) or local(1) bit,  */ +	if (byte & 0x02) +		return ADDR_LE_DEV_RANDOM; + +	return ADDR_LE_DEV_PUBLIC; +} + +static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) +{ +	u8 *eui64 = ip6_daddr->s6_addr + 8; + +	addr->b[0] = eui64[7]; +	addr->b[1] = eui64[6]; +	addr->b[2] = eui64[5]; +	addr->b[3] = eui64[2]; +	addr->b[4] = eui64[1]; +	addr->b[5] = eui64[0]; +} + +static void convert_dest_bdaddr(struct in6_addr *ip6_daddr, +				bdaddr_t *addr, u8 *addr_type) +{ +	copy_to_bdaddr(ip6_daddr, addr); + +	/* We need to toggle the U/L bit that we got from IPv6 address +	 * so that we get the proper address and type of the BD address. +	 */ +	addr->b[5] ^= 0x02; + +	*addr_type = get_addr_type_from_eui64(addr->b[5]); +} + +static int header_create(struct sk_buff *skb, struct net_device *netdev, +		         unsigned short type, const void *_daddr, +		         const void *_saddr, unsigned int len) +{ +	struct ipv6hdr *hdr; +	struct lowpan_dev *dev; +	struct lowpan_peer *peer; +	bdaddr_t addr, *any = BDADDR_ANY; +	u8 *saddr, *daddr = any->b; +	u8 addr_type; + +	if (type != ETH_P_IPV6) +		return -EINVAL; + +	hdr = ipv6_hdr(skb); + +	dev = lowpan_dev(netdev); + +	if (ipv6_addr_is_multicast(&hdr->daddr)) { +		memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, +		       sizeof(struct in6_addr)); +		lowpan_cb(skb)->conn = NULL; +	} else { +		unsigned long flags; + +		/* Get destination BT device from skb. +		 * If there is no such peer then discard the packet. +		 */ +		convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); + +		BT_DBG("dest addr %pMR type %s IP %pI6c", &addr, +		       addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", +		       &hdr->daddr); + +		read_lock_irqsave(&devices_lock, flags); +		peer = peer_lookup_ba(dev, &addr, addr_type); +		read_unlock_irqrestore(&devices_lock, flags); + +		if (!peer) { +			BT_DBG("no such peer %pMR found", &addr); +			return -ENOENT; +		} + +		daddr = peer->eui64_addr; + +		memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, +		       sizeof(struct in6_addr)); +		lowpan_cb(skb)->conn = peer->conn; +	} + +	saddr = dev->netdev->dev_addr; + +	return lowpan_header_compress(skb, netdev, type, daddr, saddr, len); +} + +/* Packet to BT LE device */ +static int send_pkt(struct l2cap_conn *conn, const void *saddr, +		    const void *daddr, struct sk_buff *skb, +		    struct net_device *netdev) +{ +	raw_dump_table(__func__, "raw skb data dump before fragmentation", +		       skb->data, skb->len); + +	return conn_send(conn, skb->data, skb->len, 0, netdev); +} + +static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) +{ +	struct sk_buff *local_skb; +	struct lowpan_dev *entry, *tmp; +	unsigned long flags; + +	read_lock_irqsave(&devices_lock, flags); + +	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { +		struct lowpan_peer *pentry, *ptmp; +		struct lowpan_dev *dev; + +		if (entry->netdev != netdev) +			continue; + +		dev = lowpan_dev(entry->netdev); + +		list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { +			local_skb = skb_clone(skb, GFP_ATOMIC); + +			send_pkt(pentry->conn, netdev->dev_addr, +				 pentry->eui64_addr, local_skb, netdev); + +			kfree_skb(local_skb); +		} +	} + +	read_unlock_irqrestore(&devices_lock, flags); +} + +static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ +	int err = 0; +	unsigned char *eui64_addr; +	struct lowpan_dev *dev; +	struct lowpan_peer *peer; +	bdaddr_t addr; +	u8 addr_type; + +	if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) { +		/* We need to send the packet to every device +		 * behind this interface. +		 */ +		send_mcast_pkt(skb, netdev); +	} else { +		unsigned long flags; + +		convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); +		eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8; +		dev = lowpan_dev(netdev); + +		read_lock_irqsave(&devices_lock, flags); +		peer = peer_lookup_ba(dev, &addr, addr_type); +		read_unlock_irqrestore(&devices_lock, flags); + +		BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p", +		       netdev->name, &addr, +		       addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", +		       &lowpan_cb(skb)->addr, peer); + +		if (peer && peer->conn) +			err = send_pkt(peer->conn, netdev->dev_addr, +				       eui64_addr, skb, netdev); +	} +	dev_kfree_skb(skb); + +	if (err) +		BT_DBG("ERROR: xmit failed (%d)", err); + +	return (err < 0) ? NET_XMIT_DROP : err; +} + +static const struct net_device_ops netdev_ops = { +	.ndo_start_xmit		= bt_xmit, +}; + +static struct header_ops header_ops = { +	.create	= header_create, +}; + +static void netdev_setup(struct net_device *dev) +{ +	dev->addr_len		= EUI64_ADDR_LEN; +	dev->type		= ARPHRD_6LOWPAN; + +	dev->hard_header_len	= 0; +	dev->needed_tailroom	= 0; +	dev->mtu		= IPV6_MIN_MTU; +	dev->tx_queue_len	= 0; +	dev->flags		= IFF_RUNNING | IFF_POINTOPOINT; +	dev->watchdog_timeo	= 0; + +	dev->netdev_ops		= &netdev_ops; +	dev->header_ops		= &header_ops; +	dev->destructor		= free_netdev; +} + +static struct device_type bt_type = { +	.name	= "bluetooth", +}; + +static void set_addr(u8 *eui, u8 *addr, u8 addr_type) +{ +	/* addr is the BT address in little-endian format */ +	eui[0] = addr[5]; +	eui[1] = addr[4]; +	eui[2] = addr[3]; +	eui[3] = 0xFF; +	eui[4] = 0xFE; +	eui[5] = addr[2]; +	eui[6] = addr[1]; +	eui[7] = addr[0]; + +	/* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ +	if (addr_type == ADDR_LE_DEV_PUBLIC) +		eui[0] &= ~0x02; +	else +		eui[0] |= 0x02; + +	BT_DBG("type %d addr %*phC", addr_type, 8, eui); +} + +static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, +		         u8 addr_type) +{ +	netdev->addr_assign_type = NET_ADDR_PERM; +	set_addr(netdev->dev_addr, addr->b, addr_type); +} + +static void ifup(struct net_device *netdev) +{ +	int err; + +	rtnl_lock(); +	err = dev_open(netdev); +	if (err < 0) +		BT_INFO("iface %s cannot be opened (%d)", netdev->name, err); +	rtnl_unlock(); +} + +static void do_notify_peers(struct work_struct *work) +{ +	struct lowpan_dev *dev = container_of(work, struct lowpan_dev, +					      notify_peers.work); + +	netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ +} + +static bool is_bt_6lowpan(struct hci_conn *hcon) +{ +	if (hcon->type != LE_LINK) +		return false; + +	return test_bit(HCI_CONN_6LOWPAN, &hcon->flags); +} + +static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev) +{ +	struct lowpan_peer *peer; +	unsigned long flags; + +	peer = kzalloc(sizeof(*peer), GFP_ATOMIC); +	if (!peer) +		return -ENOMEM; + +	peer->conn = conn; +	memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); + +	/* RFC 2464 ch. 5 */ +	peer->peer_addr.s6_addr[0] = 0xFE; +	peer->peer_addr.s6_addr[1] = 0x80; +	set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b, +	         conn->hcon->dst_type); + +	memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, +	       EUI64_ADDR_LEN); + +	write_lock_irqsave(&devices_lock, flags); +	INIT_LIST_HEAD(&peer->list); +	peer_add(dev, peer); +	write_unlock_irqrestore(&devices_lock, flags); + +	/* Notifying peers about us needs to be done without locks held */ +	INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); +	schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); + +	return 0; +} + +/* This gets called when BT LE 6LoWPAN device is connected. We then + * create network device that acts as a proxy between BT LE device + * and kernel network stack. + */ +int bt_6lowpan_add_conn(struct l2cap_conn *conn) +{ +	struct lowpan_peer *peer = NULL; +	struct lowpan_dev *dev; +	struct net_device *netdev; +	int err = 0; +	unsigned long flags; + +	if (!is_bt_6lowpan(conn->hcon)) +		return 0; + +	peer = lookup_peer(conn); +	if (peer) +		return -EEXIST; + +	dev = lookup_dev(conn); +	if (dev) +		return add_peer_conn(conn, dev); + +	netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup); +	if (!netdev) +		return -ENOMEM; + +	set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type); + +	netdev->netdev_ops = &netdev_ops; +	SET_NETDEV_DEV(netdev, &conn->hcon->dev); +	SET_NETDEV_DEVTYPE(netdev, &bt_type); + +	err = register_netdev(netdev); +	if (err < 0) { +		BT_INFO("register_netdev failed %d", err); +		free_netdev(netdev); +		goto out; +	} + +	BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR", +	       netdev->ifindex, &conn->hcon->dst, &conn->hcon->src); +	set_bit(__LINK_STATE_PRESENT, &netdev->state); + +	dev = netdev_priv(netdev); +	dev->netdev = netdev; +	dev->hdev = conn->hcon->hdev; +	INIT_LIST_HEAD(&dev->peers); + +	write_lock_irqsave(&devices_lock, flags); +	INIT_LIST_HEAD(&dev->list); +	list_add(&dev->list, &bt_6lowpan_devices); +	write_unlock_irqrestore(&devices_lock, flags); + +	ifup(netdev); + +	return add_peer_conn(conn, dev); + +out: +	return err; +} + +static void delete_netdev(struct work_struct *work) +{ +	struct lowpan_dev *entry = container_of(work, struct lowpan_dev, +						delete_netdev); + +	unregister_netdev(entry->netdev); + +	/* The entry pointer is deleted in device_event() */ +} + +int bt_6lowpan_del_conn(struct l2cap_conn *conn) +{ +	struct lowpan_dev *entry, *tmp; +	struct lowpan_dev *dev = NULL; +	struct lowpan_peer *peer; +	int err = -ENOENT; +	unsigned long flags; +	bool last = false; + +	if (!conn || !is_bt_6lowpan(conn->hcon)) +		return 0; + +	write_lock_irqsave(&devices_lock, flags); + +	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { +		dev = lowpan_dev(entry->netdev); +		peer = peer_lookup_conn(dev, conn); +		if (peer) { +			last = peer_del(dev, peer); +			err = 0; +			break; +		} +	} + +	if (!err && last && dev && !atomic_read(&dev->peer_count)) { +		write_unlock_irqrestore(&devices_lock, flags); + +		cancel_delayed_work_sync(&dev->notify_peers); + +		/* bt_6lowpan_del_conn() is called with hci dev lock held which +		 * means that we must delete the netdevice in worker thread. +		 */ +		INIT_WORK(&entry->delete_netdev, delete_netdev); +		schedule_work(&entry->delete_netdev); +	} else { +		write_unlock_irqrestore(&devices_lock, flags); +	} + +	return err; +} + +static int device_event(struct notifier_block *unused, +			unsigned long event, void *ptr) +{ +	struct net_device *netdev = netdev_notifier_info_to_dev(ptr); +	struct lowpan_dev *entry, *tmp; +	unsigned long flags; + +	if (netdev->type != ARPHRD_6LOWPAN) +		return NOTIFY_DONE; + +	switch (event) { +	case NETDEV_UNREGISTER: +		write_lock_irqsave(&devices_lock, flags); +		list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, +					 list) { +			if (entry->netdev == netdev) { +				list_del(&entry->list); +				kfree(entry); +				break; +			} +		} +		write_unlock_irqrestore(&devices_lock, flags); +		break; +	} + +	return NOTIFY_DONE; +} + +static struct notifier_block bt_6lowpan_dev_notifier = { +	.notifier_call = device_event, +}; + +int bt_6lowpan_init(void) +{ +	return register_netdevice_notifier(&bt_6lowpan_dev_notifier); +} + +void bt_6lowpan_cleanup(void) +{ +	unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); +} diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h new file mode 100644 index 00000000000..5d281f1eaf5 --- /dev/null +++ b/net/bluetooth/6lowpan.h @@ -0,0 +1,47 @@ +/* +   Copyright (c) 2013 Intel Corp. + +   This program is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License version 2 and +   only version 2 as published by the Free Software Foundation. + +   This program is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +   GNU General Public License for more details. +*/ + +#ifndef __6LOWPAN_H +#define __6LOWPAN_H + +#include <linux/errno.h> +#include <linux/skbuff.h> +#include <net/bluetooth/l2cap.h> + +#if IS_ENABLED(CONFIG_BT_6LOWPAN) +int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb); +int bt_6lowpan_add_conn(struct l2cap_conn *conn); +int bt_6lowpan_del_conn(struct l2cap_conn *conn); +int bt_6lowpan_init(void); +void bt_6lowpan_cleanup(void); +#else +static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb) +{ +	return -EOPNOTSUPP; +} +static int bt_6lowpan_add_conn(struct l2cap_conn *conn) +{ +	return -EOPNOTSUPP; +} +int bt_6lowpan_del_conn(struct l2cap_conn *conn) +{ +	return -EOPNOTSUPP; +} +static int bt_6lowpan_init(void) +{ +	return -EOPNOTSUPP; +} +static void bt_6lowpan_cleanup(void) { } +#endif + +#endif /* __6LOWPAN_H */ diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index d3f3f7b1d32..06ec14499ca 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -6,6 +6,7 @@ menuconfig BT  	tristate "Bluetooth subsystem support"  	depends on NET && !S390  	depends on RFKILL || !RFKILL +	select 6LOWPAN_IPHC if BT_6LOWPAN  	select CRC16  	select CRYPTO  	select CRYPTO_BLKCIPHER @@ -39,6 +40,12 @@ menuconfig BT  	  to Bluetooth kernel modules are provided in the BlueZ packages.  For  	  more information, see <http://www.bluez.org/>. +config BT_6LOWPAN +	bool "Bluetooth 6LoWPAN support" +	depends on BT && IPV6 +	help +	  IPv6 compression over Bluetooth. +  source "net/bluetooth/rfcomm/Kconfig"  source "net/bluetooth/bnep/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index dea6a287dac..ca51246b101 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -11,3 +11,6 @@ obj-$(CONFIG_BT_HIDP)	+= hidp/  bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \  	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \  	a2mp.o amp.o +bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o + +subdir-ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 17f33a62f6d..9514cc9e850 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -15,8 +15,9 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h>  #include <net/bluetooth/l2cap.h> -#include <net/bluetooth/a2mp.h> -#include <net/bluetooth/amp.h> + +#include "a2mp.h" +#include "amp.h"  /* Global AMP Manager list */  LIST_HEAD(amp_mgr_list); @@ -75,33 +76,26 @@ u8 __next_ident(struct amp_mgr *mgr)  	return mgr->ident;  } -static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) -{ -	cl->id = 0; -	cl->type = 0; -	cl->status = 1; -} -  /* hci_dev_list shall be locked */ -static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl) +static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)  { -	int i = 0;  	struct hci_dev *hdev; +	int i = 1; -	__a2mp_cl_bredr(cl); +	cl[0].id = AMP_ID_BREDR; +	cl[0].type = AMP_TYPE_BREDR; +	cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;  	list_for_each_entry(hdev, &hci_dev_list, list) { -		/* Iterate through AMP controllers */ -		if (hdev->id == HCI_BREDR_ID) -			continue; - -		/* Starting from second entry */ -		if (++i >= num_ctrl) -			return; - -		cl[i].id = hdev->id; -		cl[i].type = hdev->amp_type; -		cl[i].status = hdev->amp_status; +		if (hdev->dev_type == HCI_AMP) { +			cl[i].id = hdev->id; +			cl[i].type = hdev->amp_type; +			if (test_bit(HCI_UP, &hdev->flags)) +				cl[i].status = hdev->amp_status; +			else +				cl[i].status = AMP_STATUS_POWERED_DOWN; +			i++; +		}  	}  } @@ -129,6 +123,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,  	struct a2mp_discov_rsp *rsp;  	u16 ext_feat;  	u8 num_ctrl; +	struct hci_dev *hdev;  	if (len < sizeof(*req))  		return -EINVAL; @@ -152,7 +147,14 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,  	read_lock(&hci_dev_list_lock); -	num_ctrl = __hci_num_ctrl(); +	/* at minimum the BR/EDR needs to be listed */ +	num_ctrl = 1; + +	list_for_each_entry(hdev, &hci_dev_list, list) { +		if (hdev->dev_type == HCI_AMP) +			num_ctrl++; +	} +  	len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);  	rsp = kmalloc(len, GFP_ATOMIC);  	if (!rsp) { @@ -160,10 +162,10 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,  		return -ENOMEM;  	} -	rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); +	rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);  	rsp->ext_feat = 0; -	__a2mp_add_cl(mgr, rsp->cl, num_ctrl); +	__a2mp_add_cl(mgr, rsp->cl);  	read_unlock(&hci_dev_list_lock); @@ -208,7 +210,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,  		BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,  		       cl->status); -		if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) { +		if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {  			struct a2mp_info_req req;  			found = true; @@ -233,7 +235,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,  			BT_DBG("chan %p state %s", chan,  			       state_to_string(chan->state)); -			if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) +			if (chan->scid == L2CAP_CID_A2MP)  				continue;  			l2cap_chan_lock(chan); @@ -344,7 +346,7 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,  	tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);  	hdev = hci_dev_get(req->id); -	if (!hdev || hdev->amp_type == HCI_BREDR || tmp) { +	if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {  		struct a2mp_amp_assoc_rsp rsp;  		rsp.id = req->id; @@ -451,7 +453,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,  	rsp.remote_id = req->local_id;  	hdev = hci_dev_get(req->remote_id); -	if (!hdev || hdev->amp_type != HCI_AMP) { +	if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {  		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;  		goto send_rsp;  	} @@ -535,7 +537,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,  		goto send_rsp;  	} -	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst); +	hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, +				       &mgr->l2cap_conn->hcon->dst);  	if (!hcon) {  		BT_ERR("No phys link exist");  		rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS; @@ -646,7 +649,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)  	if (err) {  		struct a2mp_cmd_rej rej; -		rej.reason = __constant_cpu_to_le16(0); +		rej.reason = cpu_to_le16(0);  		hdr = (void *) skb->data;  		BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); @@ -669,7 +672,8 @@ static void a2mp_chan_close_cb(struct l2cap_chan *chan)  	l2cap_chan_put(chan);  } -static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) +static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state, +				      int err)  {  	struct amp_mgr *mgr = chan->data; @@ -691,7 +695,13 @@ static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)  static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,  					      unsigned long len, int nb)  { -	return bt_skb_alloc(len, GFP_KERNEL); +	struct sk_buff *skb; + +	skb = bt_skb_alloc(len, GFP_KERNEL); +	if (!skb) +		return ERR_PTR(-ENOMEM); + +	return skb;  }  static struct l2cap_ops a2mp_chan_ops = { @@ -706,6 +716,9 @@ static struct l2cap_ops a2mp_chan_ops = {  	.teardown = l2cap_chan_no_teardown,  	.ready = l2cap_chan_no_ready,  	.defer = l2cap_chan_no_defer, +	.resume = l2cap_chan_no_resume, +	.set_shutdown = l2cap_chan_no_set_shutdown, +	.get_sndtimeo = l2cap_chan_no_get_sndtimeo,  };  static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) @@ -719,7 +732,11 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)  	BT_DBG("chan %p", chan); -	chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP; +	chan->chan_type = L2CAP_CHAN_FIXED; +	chan->scid = L2CAP_CID_A2MP; +	chan->dcid = L2CAP_CID_A2MP; +	chan->omtu = L2CAP_A2MP_DEFAULT_MTU; +	chan->imtu = L2CAP_A2MP_DEFAULT_MTU;  	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;  	chan->ops = &a2mp_chan_ops; @@ -829,6 +846,9 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,  {  	struct amp_mgr *mgr; +	if (conn->hcon->type != ACL_LINK) +		return NULL; +  	mgr = amp_mgr_create(conn, false);  	if (!mgr) {  		BT_ERR("Could not create AMP manager"); @@ -871,7 +891,7 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)  	rsp.id = hdev->id;  	rsp.status = A2MP_STATUS_INVALID_CTRL_ID; -	if (hdev->amp_type != HCI_BREDR) { +	if (hdev->amp_type != AMP_TYPE_BREDR) {  		rsp.status = 0;  		rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);  		rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h new file mode 100644 index 00000000000..487b54c1308 --- /dev/null +++ b/net/bluetooth/a2mp.h @@ -0,0 +1,150 @@ +/* +   Copyright (c) 2010,2011 Code Aurora Forum.  All rights reserved. +   Copyright (c) 2011,2012 Intel Corp. + +   This program is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License version 2 and +   only version 2 as published by the Free Software Foundation. + +   This program is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +   GNU General Public License for more details. +*/ + +#ifndef __A2MP_H +#define __A2MP_H + +#include <net/bluetooth/l2cap.h> + +#define A2MP_FEAT_EXT	0x8000 + +enum amp_mgr_state { +	READ_LOC_AMP_INFO, +	READ_LOC_AMP_ASSOC, +	READ_LOC_AMP_ASSOC_FINAL, +	WRITE_REMOTE_AMP_ASSOC, +}; + +struct amp_mgr { +	struct list_head	list; +	struct l2cap_conn	*l2cap_conn; +	struct l2cap_chan	*a2mp_chan; +	struct l2cap_chan	*bredr_chan; +	struct kref		kref; +	__u8			ident; +	__u8			handle; +	unsigned long		state; +	unsigned long		flags; + +	struct list_head	amp_ctrls; +	struct mutex		amp_ctrls_lock; +}; + +struct a2mp_cmd { +	__u8	code; +	__u8	ident; +	__le16	len; +	__u8	data[0]; +} __packed; + +/* A2MP command codes */ +#define A2MP_COMMAND_REJ         0x01 +struct a2mp_cmd_rej { +	__le16	reason; +	__u8	data[0]; +} __packed; + +#define A2MP_DISCOVER_REQ        0x02 +struct a2mp_discov_req { +	__le16	mtu; +	__le16	ext_feat; +} __packed; + +struct a2mp_cl { +	__u8	id; +	__u8	type; +	__u8	status; +} __packed; + +#define A2MP_DISCOVER_RSP        0x03 +struct a2mp_discov_rsp { +	__le16     mtu; +	__le16     ext_feat; +	struct a2mp_cl cl[0]; +} __packed; + +#define A2MP_CHANGE_NOTIFY       0x04 +#define A2MP_CHANGE_RSP          0x05 + +#define A2MP_GETINFO_REQ         0x06 +struct a2mp_info_req { +	__u8       id; +} __packed; + +#define A2MP_GETINFO_RSP         0x07 +struct a2mp_info_rsp { +	__u8	id; +	__u8	status; +	__le32	total_bw; +	__le32	max_bw; +	__le32	min_latency; +	__le16	pal_cap; +	__le16	assoc_size; +} __packed; + +#define A2MP_GETAMPASSOC_REQ     0x08 +struct a2mp_amp_assoc_req { +	__u8	id; +} __packed; + +#define A2MP_GETAMPASSOC_RSP     0x09 +struct a2mp_amp_assoc_rsp { +	__u8	id; +	__u8	status; +	__u8	amp_assoc[0]; +} __packed; + +#define A2MP_CREATEPHYSLINK_REQ  0x0A +#define A2MP_DISCONNPHYSLINK_REQ 0x0C +struct a2mp_physlink_req { +	__u8	local_id; +	__u8	remote_id; +	__u8	amp_assoc[0]; +} __packed; + +#define A2MP_CREATEPHYSLINK_RSP  0x0B +#define A2MP_DISCONNPHYSLINK_RSP 0x0D +struct a2mp_physlink_rsp { +	__u8	local_id; +	__u8	remote_id; +	__u8	status; +} __packed; + +/* A2MP response status */ +#define A2MP_STATUS_SUCCESS			0x00 +#define A2MP_STATUS_INVALID_CTRL_ID		0x01 +#define A2MP_STATUS_UNABLE_START_LINK_CREATION	0x02 +#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS	0x02 +#define A2MP_STATUS_COLLISION_OCCURED		0x03 +#define A2MP_STATUS_DISCONN_REQ_RECVD		0x04 +#define A2MP_STATUS_PHYS_LINK_EXISTS		0x05 +#define A2MP_STATUS_SECURITY_VIOLATION		0x06 + +extern struct list_head amp_mgr_list; +extern struct mutex amp_mgr_list_lock; + +struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr); +int amp_mgr_put(struct amp_mgr *mgr); +u8 __next_ident(struct amp_mgr *mgr); +struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, +				       struct sk_buff *skb); +struct amp_mgr *amp_mgr_lookup_by_state(u8 state); +void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data); +void a2mp_discover_amp(struct l2cap_chan *chan); +void a2mp_send_getinfo_rsp(struct hci_dev *hdev); +void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status); +void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status); +void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status); + +#endif /* __A2MP_H */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 9096137c889..2021c481cdb 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -25,12 +25,13 @@  /* Bluetooth address family and sockets. */  #include <linux/module.h> +#include <linux/debugfs.h>  #include <asm/ioctls.h>  #include <net/bluetooth/bluetooth.h>  #include <linux/proc_fs.h> -#define VERSION "2.16" +#define VERSION "2.19"  /* Bluetooth sockets */  #define BT_MAX_PROTO	8 @@ -221,12 +222,11 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (flags & (MSG_OOB))  		return -EOPNOTSUPP; -	msg->msg_namelen = 0; -  	skb = skb_recv_datagram(sk, flags, noblock, &err);  	if (!skb) {  		if (sk->sk_shutdown & RCV_SHUTDOWN)  			return 0; +  		return err;  	} @@ -238,9 +238,14 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	skb_reset_transport_header(skb);  	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); -	if (err == 0) +	if (err == 0) {  		sock_recv_ts_and_drops(msg, sk, skb); +		if (bt_sk(sk)->skb_msg_name) +			bt_sk(sk)->skb_msg_name(skb, msg->msg_name, +						&msg->msg_namelen); +	} +  	skb_free_datagram(sk, skb);  	return err ? : copied; @@ -287,8 +292,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (flags & MSG_OOB)  		return -EOPNOTSUPP; -	msg->msg_namelen = 0; -  	BT_DBG("sk %p size %zu", sk, size);  	lock_sock(sk); @@ -490,6 +493,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)  }  EXPORT_SYMBOL(bt_sock_ioctl); +/* This function expects the sk lock to be held when called */  int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)  {  	DECLARE_WAITQUEUE(wait, current); @@ -525,6 +529,46 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)  }  EXPORT_SYMBOL(bt_sock_wait_state); +/* This function expects the sk lock to be held when called */ +int bt_sock_wait_ready(struct sock *sk, unsigned long flags) +{ +	DECLARE_WAITQUEUE(wait, current); +	unsigned long timeo; +	int err = 0; + +	BT_DBG("sk %p", sk); + +	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + +	add_wait_queue(sk_sleep(sk), &wait); +	set_current_state(TASK_INTERRUPTIBLE); +	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { +		if (!timeo) { +			err = -EAGAIN; +			break; +		} + +		if (signal_pending(current)) { +			err = sock_intr_errno(timeo); +			break; +		} + +		release_sock(sk); +		timeo = schedule_timeout(timeo); +		lock_sock(sk); +		set_current_state(TASK_INTERRUPTIBLE); + +		err = sock_error(sk); +		if (err) +			break; +	} +	__set_current_state(TASK_RUNNING); +	remove_wait_queue(sk_sleep(sk), &wait); + +	return err; +} +EXPORT_SYMBOL(bt_sock_wait_ready); +  #ifdef CONFIG_PROC_FS  struct bt_seq_state {  	struct bt_sock_list *l; @@ -563,7 +607,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)  	struct bt_sock_list *l = s->l;  	if (v == SEQ_START_TOKEN) { -		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent"); +		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");  		if (l->custom_seq_show) {  			seq_putc(seq, ' '); @@ -576,15 +620,13 @@ static int bt_seq_show(struct seq_file *seq, void *v)  		struct bt_sock *bt = bt_sk(sk);  		seq_printf(seq, -			   "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu", +			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",  			   sk,  			   atomic_read(&sk->sk_refcnt),  			   sk_rmem_alloc_get(sk),  			   sk_wmem_alloc_get(sk),  			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),  			   sock_i_ino(sk), -			   &bt->src, -			   &bt->dst,  			   bt->parent? sock_i_ino(bt->parent): 0LU);  		if (l->custom_seq_show) { @@ -662,12 +704,17 @@ static struct net_proto_family bt_sock_family_ops = {  	.create	= bt_sock_create,  }; +struct dentry *bt_debugfs; +EXPORT_SYMBOL_GPL(bt_debugfs); +  static int __init bt_init(void)  {  	int err;  	BT_INFO("Core ver %s", VERSION); +	bt_debugfs = debugfs_create_dir("bluetooth", NULL); +  	err = bt_sysfs_init();  	if (err < 0)  		return err; @@ -708,7 +755,6 @@ error:  static void __exit bt_exit(void)  { -  	sco_exit();  	l2cap_exit(); @@ -718,6 +764,8 @@ static void __exit bt_exit(void)  	sock_unregister(PF_BLUETOOTH);  	bt_sysfs_cleanup(); + +	debugfs_remove_recursive(bt_debugfs);  }  subsys_initcall(bt_init); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index d459ed43c77..bb39509b3f0 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -14,10 +14,11 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci.h>  #include <net/bluetooth/hci_core.h> -#include <net/bluetooth/a2mp.h> -#include <net/bluetooth/amp.h>  #include <crypto/hash.h> +#include "a2mp.h" +#include "amp.h" +  /* Remote AMP Controllers interface */  void amp_ctrl_get(struct amp_ctrl *ctrl)  { @@ -110,7 +111,7 @@ static u8 __next_handle(struct amp_mgr *mgr)  struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,  			     u8 remote_id, bool out)  { -	bdaddr_t *dst = mgr->l2cap_conn->dst; +	bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;  	struct hci_conn *hcon;  	hcon = hci_conn_add(hdev, AMP_LINK, dst); @@ -409,7 +410,8 @@ void amp_create_logical_link(struct l2cap_chan *chan)  	struct hci_cp_create_accept_logical_link cp;  	struct hci_dev *hdev; -	BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst); +	BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, +	       &chan->conn->hcon->dst);  	if (!hs_hcon)  		return; diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h new file mode 100644 index 00000000000..7ea3db77ba8 --- /dev/null +++ b/net/bluetooth/amp.h @@ -0,0 +1,54 @@ +/* +   Copyright (c) 2011,2012 Intel Corp. + +   This program is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License version 2 and +   only version 2 as published by the Free Software Foundation. + +   This program is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +   GNU General Public License for more details. +*/ + +#ifndef __AMP_H +#define __AMP_H + +struct amp_ctrl { +	struct list_head	list; +	struct kref		kref; +	__u8			id; +	__u16			assoc_len_so_far; +	__u16			assoc_rem_len; +	__u16			assoc_len; +	__u8			*assoc; +}; + +int amp_ctrl_put(struct amp_ctrl *ctrl); +void amp_ctrl_get(struct amp_ctrl *ctrl); +struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id); +struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id); +void amp_ctrl_list_flush(struct amp_mgr *mgr); + +struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, +			     u8 remote_id, bool out); + +int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type); + +void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr); +void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle); +void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr); +void amp_read_loc_assoc_final_data(struct hci_dev *hdev, +				   struct hci_conn *hcon); +void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, +			struct hci_conn *hcon); +void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, +			struct hci_conn *hcon); +void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle); +void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle); +void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon); +void amp_create_logical_link(struct l2cap_chan *chan); +void amp_disconnect_logical_link(struct hci_chan *hchan); +void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason); + +#endif /* __AMP_H */ diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index e7ee5314f39..5a5b16f365e 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -12,8 +12,7 @@    GNU General Public License for more details.    You should have received a copy of the GNU General Public License -  along with this program; if not, write to the Free Software -  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA +  along with this program; if not, see <http://www.gnu.org/licenses/>.  */  #ifndef _BNEP_H diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index e430b1abcd2..a841d3e776c 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -32,6 +32,7 @@  #include <asm/unaligned.h>  #include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/l2cap.h>  #include <net/bluetooth/hci_core.h>  #include "bnep.h" @@ -510,20 +511,13 @@ static int bnep_session(void *arg)  static struct device *bnep_get_device(struct bnep_session *session)  { -	bdaddr_t *src = &bt_sk(session->sock->sk)->src; -	bdaddr_t *dst = &bt_sk(session->sock->sk)->dst; -	struct hci_dev *hdev;  	struct hci_conn *conn; -	hdev = hci_get_route(dst, src); -	if (!hdev) +	conn = l2cap_pi(session->sock->sk)->chan->conn->hcon; +	if (!conn)  		return NULL; -	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); - -	hci_dev_put(hdev); - -	return conn ? &conn->dev : NULL; +	return &conn->dev;  }  static struct device_type bnep_type = { @@ -539,8 +533,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)  	BT_DBG(""); -	baswap((void *) dst, &bt_sk(sock->sk)->dst); -	baswap((void *) src, &bt_sk(sock->sk)->src); +	baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); +	baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);  	/* session struct allocated as private part of net_device */  	dev = alloc_netdev(sizeof(struct bnep_session), diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index e0a6ebf2baa..67fe5e84e68 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -340,20 +340,20 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)  	down_write(&cmtp_session_sem); -	s = __cmtp_get_session(&bt_sk(sock->sk)->dst); +	s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);  	if (s && s->state == BT_CONNECTED) {  		err = -EEXIST;  		goto failed;  	} -	bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); +	bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);  	session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,  					l2cap_pi(sock->sk)->chan->imtu);  	BT_DBG("mtu %d", session->mtu); -	sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst); +	sprintf(session->name, "%pMR", &session->bdaddr);  	session->sock  = sock;  	session->state = BT_CONFIG; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index f0817121ec5..a7a27bc2c0b 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -28,8 +28,10 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h> -#include <net/bluetooth/a2mp.h> -#include <net/bluetooth/smp.h> +#include <net/bluetooth/l2cap.h> + +#include "smp.h" +#include "a2mp.h"  struct sco_param {  	u16 pkt_type; @@ -49,30 +51,6 @@ static const struct sco_param sco_param_wideband[] = {  	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */  }; -static void hci_le_create_connection(struct hci_conn *conn) -{ -	struct hci_dev *hdev = conn->hdev; -	struct hci_cp_le_create_conn cp; - -	conn->state = BT_CONNECT; -	conn->out = true; -	conn->link_mode |= HCI_LM_MASTER; -	conn->sec_level = BT_SECURITY_LOW; - -	memset(&cp, 0, sizeof(cp)); -	cp.scan_interval = __constant_cpu_to_le16(0x0060); -	cp.scan_window = __constant_cpu_to_le16(0x0030); -	bacpy(&cp.peer_addr, &conn->dst); -	cp.peer_addr_type = conn->dst_type; -	cp.conn_interval_min = __constant_cpu_to_le16(0x0028); -	cp.conn_interval_max = __constant_cpu_to_le16(0x0038); -	cp.supervision_timeout = __constant_cpu_to_le16(0x002a); -	cp.min_ce_len = __constant_cpu_to_le16(0x0000); -	cp.max_ce_len = __constant_cpu_to_le16(0x0000); - -	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); -} -  static void hci_le_create_connection_cancel(struct hci_conn *conn)  {  	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); @@ -105,7 +83,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)  			cp.pscan_rep_mode = ie->data.pscan_rep_mode;  			cp.pscan_mode     = ie->data.pscan_mode;  			cp.clock_offset   = ie->data.clock_offset | -					    __constant_cpu_to_le16(0x8000); +					    cpu_to_le16(0x8000);  		}  		memcpy(conn->dev_class, ie->data.dev_class, 3); @@ -205,8 +183,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)  	cp.handle   = cpu_to_le16(handle); -	cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40); -	cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40); +	cp.tx_bandwidth   = cpu_to_le32(0x00001f40); +	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);  	cp.voice_setting  = cpu_to_le16(conn->setting);  	switch (conn->setting & SCO_AIRMODE_MASK) { @@ -248,13 +226,13 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,  	cp.conn_interval_max	= cpu_to_le16(max);  	cp.conn_latency		= cpu_to_le16(latency);  	cp.supervision_timeout	= cpu_to_le16(to_multiplier); -	cp.min_ce_len		= __constant_cpu_to_le16(0x0001); -	cp.max_ce_len		= __constant_cpu_to_le16(0x0001); +	cp.min_ce_len		= cpu_to_le16(0x0000); +	cp.max_ce_len		= cpu_to_le16(0x0000);  	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);  } -void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,  		      __u8 ltk[16])  {  	struct hci_dev *hdev = conn->hdev; @@ -265,9 +243,9 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],  	memset(&cp, 0, sizeof(cp));  	cp.handle = cpu_to_le16(conn->handle); -	memcpy(cp.ltk, ltk, sizeof(cp.ltk)); +	cp.rand = rand;  	cp.ediv = ediv; -	memcpy(cp.rand, rand, sizeof(cp.rand)); +	memcpy(cp.ltk, ltk, sizeof(cp.ltk));  	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);  } @@ -311,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)  {  	struct hci_conn *conn = container_of(work, struct hci_conn,  					     disc_work.work); +	int refcnt = atomic_read(&conn->refcnt);  	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); -	if (atomic_read(&conn->refcnt)) +	WARN_ON(refcnt < 0); + +	/* FIXME: It was observed that in pairing failed scenario, refcnt +	 * drops below 0. Probably this is because l2cap_conn_del calls +	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is +	 * dropped. After that loop hci_chan_del is called which also drops +	 * conn. For now make sure that ACL is alive if refcnt is higher then 0, +	 * otherwise drop it. +	 */ +	if (refcnt > 0)  		return;  	switch (conn->state) { @@ -340,8 +328,10 @@ static void hci_conn_timeout(struct work_struct *work)  }  /* Enter sniff mode */ -static void hci_conn_enter_sniff_mode(struct hci_conn *conn) +static void hci_conn_idle(struct work_struct *work)  { +	struct hci_conn *conn = container_of(work, struct hci_conn, +					     idle_work.work);  	struct hci_dev *hdev = conn->hdev;  	BT_DBG("hcon %p mode %d", conn, conn->mode); @@ -358,9 +348,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)  	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {  		struct hci_cp_sniff_subrate cp;  		cp.handle             = cpu_to_le16(conn->handle); -		cp.max_latency        = __constant_cpu_to_le16(0); -		cp.min_remote_timeout = __constant_cpu_to_le16(0); -		cp.min_local_timeout  = __constant_cpu_to_le16(0); +		cp.max_latency        = cpu_to_le16(0); +		cp.min_remote_timeout = cpu_to_le16(0); +		cp.min_local_timeout  = cpu_to_le16(0);  		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);  	} @@ -369,28 +359,43 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)  		cp.handle       = cpu_to_le16(conn->handle);  		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);  		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); -		cp.attempt      = __constant_cpu_to_le16(4); -		cp.timeout      = __constant_cpu_to_le16(1); +		cp.attempt      = cpu_to_le16(4); +		cp.timeout      = cpu_to_le16(1);  		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);  	}  } -static void hci_conn_idle(unsigned long arg) +static void hci_conn_auto_accept(struct work_struct *work)  { -	struct hci_conn *conn = (void *) arg; - -	BT_DBG("hcon %p mode %d", conn, conn->mode); +	struct hci_conn *conn = container_of(work, struct hci_conn, +					     auto_accept_work.work); -	hci_conn_enter_sniff_mode(conn); +	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), +		     &conn->dst);  } -static void hci_conn_auto_accept(unsigned long arg) +static void le_conn_timeout(struct work_struct *work)  { -	struct hci_conn *conn = (void *) arg; +	struct hci_conn *conn = container_of(work, struct hci_conn, +					     le_conn_timeout.work);  	struct hci_dev *hdev = conn->hdev; -	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), -		     &conn->dst); +	BT_DBG(""); + +	/* We could end up here due to having done directed advertising, +	 * so clean up the state if necessary. This should however only +	 * happen with broken hardware or if low duty cycle was used +	 * (which doesn't have a timeout of its own). +	 */ +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { +		u8 enable = 0x00; +		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), +			     &enable); +		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); +		return; +	} + +	hci_le_create_connection_cancel(conn);  }  struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) @@ -404,6 +409,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)  		return NULL;  	bacpy(&conn->dst, dst); +	bacpy(&conn->src, &hdev->bdaddr);  	conn->hdev  = hdev;  	conn->type  = type;  	conn->mode  = HCI_CM_ACTIVE; @@ -412,6 +418,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)  	conn->io_capability = hdev->io_capability;  	conn->remote_auth = 0xff;  	conn->key_type = 0xff; +	conn->tx_power = HCI_TX_POWER_INVALID; +	conn->max_tx_power = HCI_TX_POWER_INVALID;  	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);  	conn->disc_timeout = HCI_DISCONN_TIMEOUT; @@ -420,6 +428,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)  	case ACL_LINK:  		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;  		break; +	case LE_LINK: +		/* conn->src should reflect the local identity address */ +		hci_copy_identity_address(hdev, &conn->src, &conn->src_type); +		break;  	case SCO_LINK:  		if (lmp_esco_capable(hdev))  			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | @@ -437,9 +449,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)  	INIT_LIST_HEAD(&conn->chan_list);  	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); -	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); -	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, -		    (unsigned long) conn); +	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); +	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); +	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);  	atomic_set(&conn->refcnt, 0); @@ -460,11 +472,9 @@ int hci_conn_del(struct hci_conn *conn)  	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); -	del_timer(&conn->idle_timer); -  	cancel_delayed_work_sync(&conn->disc_work); - -	del_timer(&conn->auto_accept_timer); +	cancel_delayed_work_sync(&conn->auto_accept_work); +	cancel_delayed_work_sync(&conn->idle_work);  	if (conn->type == ACL_LINK) {  		struct hci_conn *sco = conn->link; @@ -474,6 +484,8 @@ int hci_conn_del(struct hci_conn *conn)  		/* Unacked frames */  		hdev->acl_cnt += conn->sent;  	} else if (conn->type == LE_LINK) { +		cancel_delayed_work_sync(&conn->le_conn_timeout); +  		if (hdev->le_pkts)  			hdev->le_cnt += conn->sent;  		else @@ -518,6 +530,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)  	list_for_each_entry(d, &hci_dev_list, list) {  		if (!test_bit(HCI_UP, &d->flags) ||  		    test_bit(HCI_RAW, &d->flags) || +		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||  		    d->dev_type != HCI_BREDR)  			continue; @@ -545,41 +558,235 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)  }  EXPORT_SYMBOL(hci_get_route); -static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, -				    u8 dst_type, u8 sec_level, u8 auth_type) +/* This function requires the caller holds hdev->lock */ +void hci_le_conn_failed(struct hci_conn *conn, u8 status)  { -	struct hci_conn *le; +	struct hci_dev *hdev = conn->hdev; -	if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags)) -		return ERR_PTR(-ENOTSUPP); +	conn->state = BT_CLOSED; -	le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); -	if (!le) { -		le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); -		if (le) -			return ERR_PTR(-EBUSY); +	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type, +			    status); -		le = hci_conn_add(hdev, LE_LINK, dst); -		if (!le) -			return ERR_PTR(-ENOMEM); +	hci_proto_connect_cfm(conn, status); + +	hci_conn_del(conn); + +	/* Since we may have temporarily stopped the background scanning in +	 * favor of connection establishment, we should restart it. +	 */ +	hci_update_background_scan(hdev); + +	/* Re-enable advertising in case this was a failed connection +	 * attempt as a peripheral. +	 */ +	mgmt_reenable_advertising(hdev); +} + +static void create_le_conn_complete(struct hci_dev *hdev, u8 status) +{ +	struct hci_conn *conn; + +	if (status == 0) +		return; + +	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x", +	       status); + +	hci_dev_lock(hdev); + +	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); +	if (!conn) +		goto done; + +	hci_le_conn_failed(conn, status); + +done: +	hci_dev_unlock(hdev); +} + +static void hci_req_add_le_create_conn(struct hci_request *req, +				       struct hci_conn *conn) +{ +	struct hci_cp_le_create_conn cp; +	struct hci_dev *hdev = conn->hdev; +	u8 own_addr_type; + +	memset(&cp, 0, sizeof(cp)); + +	/* Update random address, but set require_privacy to false so +	 * that we never connect with an unresolvable address. +	 */ +	if (hci_update_random_address(req, false, &own_addr_type)) +		return; -		le->dst_type = bdaddr_to_le(dst_type); -		hci_le_create_connection(le); +	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); +	cp.scan_window = cpu_to_le16(hdev->le_scan_window); +	bacpy(&cp.peer_addr, &conn->dst); +	cp.peer_addr_type = conn->dst_type; +	cp.own_address_type = own_addr_type; +	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); +	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); +	cp.supervision_timeout = cpu_to_le16(0x002a); +	cp.min_ce_len = cpu_to_le16(0x0000); +	cp.max_ce_len = cpu_to_le16(0x0000); + +	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); + +	conn->state = BT_CONNECT; +} + +static void hci_req_directed_advertising(struct hci_request *req, +					 struct hci_conn *conn) +{ +	struct hci_dev *hdev = req->hdev; +	struct hci_cp_le_set_adv_param cp; +	u8 own_addr_type; +	u8 enable; + +	enable = 0x00; +	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); + +	/* Clear the HCI_ADVERTISING bit temporarily so that the +	 * hci_update_random_address knows that it's safe to go ahead +	 * and write a new random address. The flag will be set back on +	 * as soon as the SET_ADV_ENABLE HCI command completes. +	 */ +	clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + +	/* Set require_privacy to false so that the remote device has a +	 * chance of identifying us. +	 */ +	if (hci_update_random_address(req, false, &own_addr_type) < 0) +		return; + +	memset(&cp, 0, sizeof(cp)); +	cp.type = LE_ADV_DIRECT_IND; +	cp.own_address_type = own_addr_type; +	cp.direct_addr_type = conn->dst_type; +	bacpy(&cp.direct_addr, &conn->dst); +	cp.channel_map = hdev->le_adv_channel_map; + +	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); + +	enable = 0x01; +	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); + +	conn->state = BT_CONNECT; +} + +struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, +				u8 dst_type, u8 sec_level, u8 auth_type) +{ +	struct hci_conn_params *params; +	struct hci_conn *conn; +	struct smp_irk *irk; +	struct hci_request req; +	int err; + +	/* Some devices send ATT messages as soon as the physical link is +	 * established. To be able to handle these ATT messages, the user- +	 * space first establishes the connection and then starts the pairing +	 * process. +	 * +	 * So if a hci_conn object already exists for the following connection +	 * attempt, we simply update pending_sec_level and auth_type fields +	 * and return the object found. +	 */ +	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); +	if (conn) { +		conn->pending_sec_level = sec_level; +		conn->auth_type = auth_type; +		goto done; +	} + +	/* Since the controller supports only one LE connection attempt at a +	 * time, we return -EBUSY if there is any connection attempt running. +	 */ +	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); +	if (conn) +		return ERR_PTR(-EBUSY); + +	/* When given an identity address with existing identity +	 * resolving key, the connection needs to be established +	 * to a resolvable random address. +	 * +	 * This uses the cached random resolvable address from +	 * a previous scan. When no cached address is available, +	 * try connecting to the identity address instead. +	 * +	 * Storing the resolvable random address is required here +	 * to handle connection failures. The address will later +	 * be resolved back into the original identity address +	 * from the connect request. +	 */ +	irk = hci_find_irk_by_addr(hdev, dst, dst_type); +	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { +		dst = &irk->rpa; +		dst_type = ADDR_LE_DEV_RANDOM;  	} -	le->pending_sec_level = sec_level; -	le->auth_type = auth_type; +	conn = hci_conn_add(hdev, LE_LINK, dst); +	if (!conn) +		return ERR_PTR(-ENOMEM); + +	conn->dst_type = dst_type; +	conn->sec_level = BT_SECURITY_LOW; +	conn->pending_sec_level = sec_level; +	conn->auth_type = auth_type; -	hci_conn_hold(le); +	hci_req_init(&req, hdev); -	return le; +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { +		hci_req_directed_advertising(&req, conn); +		goto create_conn; +	} + +	conn->out = true; +	conn->link_mode |= HCI_LM_MASTER; + +	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); +	if (params) { +		conn->le_conn_min_interval = params->conn_min_interval; +		conn->le_conn_max_interval = params->conn_max_interval; +	} else { +		conn->le_conn_min_interval = hdev->le_conn_min_interval; +		conn->le_conn_max_interval = hdev->le_conn_max_interval; +	} + +	/* If controller is scanning, we stop it since some controllers are +	 * not able to scan and connect at the same time. Also set the +	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete +	 * handler for scan disabling knows to set the correct discovery +	 * state. +	 */ +	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { +		hci_req_add_le_scan_disable(&req); +		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags); +	} + +	hci_req_add_le_create_conn(&req, conn); + +create_conn: +	err = hci_req_run(&req, create_le_conn_complete); +	if (err) { +		hci_conn_del(conn); +		return ERR_PTR(err); +	} + +done: +	hci_conn_hold(conn); +	return conn;  } -static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, -						u8 sec_level, u8 auth_type) +struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, +				 u8 sec_level, u8 auth_type)  {  	struct hci_conn *acl; +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		return ERR_PTR(-ENOTSUPP); +  	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);  	if (!acl) {  		acl = hci_conn_add(hdev, ACL_LINK, dst); @@ -642,27 +849,22 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,  	return sco;  } -/* Create SCO, ACL or LE connection. */ -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, -			     __u8 dst_type, __u8 sec_level, __u8 auth_type) -{ -	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type); - -	switch (type) { -	case LE_LINK: -		return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type); -	case ACL_LINK: -		return hci_connect_acl(hdev, dst, sec_level, auth_type); -	} - -	return ERR_PTR(-EINVAL); -} -  /* Check link security requirement */  int hci_conn_check_link_mode(struct hci_conn *conn)  {  	BT_DBG("hcon %p", conn); +	/* In Secure Connections Only mode, it is required that Secure +	 * Connections is used and the link is encrypted with AES-CCM +	 * using a P-256 authenticated combination key. +	 */ +	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) { +		if (!hci_conn_sc_enabled(conn) || +		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) || +		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256) +			return 0; +	} +  	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))  		return 0; @@ -690,14 +892,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)  	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {  		struct hci_cp_auth_requested cp; -		/* encrypt must be pending if auth is also pending */ -		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); -  		cp.handle = cpu_to_le16(conn->handle);  		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,  			     sizeof(cp), &cp); -		if (conn->key_type != 0xff) + +		/* If we're already encrypted set the REAUTH_PEND flag, +		 * otherwise set the ENCRYPT_PEND. +		 */ +		if (conn->link_mode & HCI_LM_ENCRYPT)  			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); +		else +			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);  	}  	return 0; @@ -738,14 +943,23 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)  	if (!(conn->link_mode & HCI_LM_AUTH))  		goto auth; -	/* An authenticated combination key has sufficient security for any -	   security level. */ -	if (conn->key_type == HCI_LK_AUTH_COMBINATION) +	/* An authenticated FIPS approved combination key has sufficient +	 * security for security level 4. */ +	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 && +	    sec_level == BT_SECURITY_FIPS) +		goto encrypt; + +	/* An authenticated combination key has sufficient security for +	   security level 3. */ +	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 || +	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) && +	    sec_level == BT_SECURITY_HIGH)  		goto encrypt;  	/* An unauthenticated combination key has sufficient security for  	   security level 1 and 2. */ -	if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && +	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 || +	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&  	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))  		goto encrypt; @@ -754,7 +968,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)  	   is generated using maximum PIN code length (16).  	   For pre 2.1 units. */  	if (conn->key_type == HCI_LK_COMBINATION && -	    (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16)) +	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW || +	     conn->pin_length == 16))  		goto encrypt;  auth: @@ -778,13 +993,17 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)  {  	BT_DBG("hcon %p", conn); -	if (sec_level != BT_SECURITY_HIGH) -		return 1; /* Accept if non-secure is required */ +	/* Accept if non-secure or higher security level is required */ +	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) +		return 1; -	if (conn->sec_level == BT_SECURITY_HIGH) +	/* Accept if secure or higher security level is already present */ +	if (conn->sec_level == BT_SECURITY_HIGH || +	    conn->sec_level == BT_SECURITY_FIPS)  		return 1; -	return 0; /* Reject not secure link */ +	/* Reject not secure link */ +	return 0;  }  EXPORT_SYMBOL(hci_conn_check_secure); @@ -846,8 +1065,8 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)  timer:  	if (hdev->idle_timeout > 0) -		mod_timer(&conn->idle_timer, -			  jiffies + msecs_to_jiffies(hdev->idle_timeout)); +		queue_delayed_work(hdev->workqueue, &conn->idle_work, +				   msecs_to_jiffies(hdev->idle_timeout));  }  /* Drop all connection on the device */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index fb7356fcfe5..0a43cce9a91 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -27,11 +27,16 @@  #include <linux/export.h>  #include <linux/idr.h> -  #include <linux/rfkill.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <asm/unaligned.h>  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "smp.h"  static void hci_rx_work(struct work_struct *work);  static void hci_cmd_work(struct work_struct *work); @@ -55,6 +60,1021 @@ static void hci_notify(struct hci_dev *hdev, int event)  	hci_sock_dev_event(hdev, event);  } +/* ---- HCI debugfs entries ---- */ + +static ssize_t dut_mode_read(struct file *file, char __user *user_buf, +			     size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, +			      size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	struct sk_buff *skb; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; +	int err; + +	if (!test_bit(HCI_UP, &hdev->flags)) +		return -ENETDOWN; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags)) +		return -EALREADY; + +	hci_req_lock(hdev); +	if (enable) +		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, +				     HCI_CMD_TIMEOUT); +	else +		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, +				     HCI_CMD_TIMEOUT); +	hci_req_unlock(hdev); + +	if (IS_ERR(skb)) +		return PTR_ERR(skb); + +	err = -bt_to_errno(skb->data[0]); +	kfree_skb(skb); + +	if (err < 0) +		return err; + +	change_bit(HCI_DUT_MODE, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations dut_mode_fops = { +	.open		= simple_open, +	.read		= dut_mode_read, +	.write		= dut_mode_write, +	.llseek		= default_llseek, +}; + +static int features_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	u8 p; + +	hci_dev_lock(hdev); +	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { +		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " +			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p, +			   hdev->features[p][0], hdev->features[p][1], +			   hdev->features[p][2], hdev->features[p][3], +			   hdev->features[p][4], hdev->features[p][5], +			   hdev->features[p][6], hdev->features[p][7]); +	} +	if (lmp_le_capable(hdev)) +		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " +			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", +			   hdev->le_features[0], hdev->le_features[1], +			   hdev->le_features[2], hdev->le_features[3], +			   hdev->le_features[4], hdev->le_features[5], +			   hdev->le_features[6], hdev->le_features[7]); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int features_open(struct inode *inode, struct file *file) +{ +	return single_open(file, features_show, inode->i_private); +} + +static const struct file_operations features_fops = { +	.open		= features_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int blacklist_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct bdaddr_list *b; + +	hci_dev_lock(hdev); +	list_for_each_entry(b, &hdev->blacklist, list) +		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int blacklist_open(struct inode *inode, struct file *file) +{ +	return single_open(file, blacklist_show, inode->i_private); +} + +static const struct file_operations blacklist_fops = { +	.open		= blacklist_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int uuids_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct bt_uuid *uuid; + +	hci_dev_lock(hdev); +	list_for_each_entry(uuid, &hdev->uuids, list) { +		u8 i, val[16]; + +		/* The Bluetooth UUID values are stored in big endian, +		 * but with reversed byte order. So convert them into +		 * the right order for the %pUb modifier. +		 */ +		for (i = 0; i < 16; i++) +			val[i] = uuid->uuid[15 - i]; + +		seq_printf(f, "%pUb\n", val); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int uuids_open(struct inode *inode, struct file *file) +{ +	return single_open(file, uuids_show, inode->i_private); +} + +static const struct file_operations uuids_fops = { +	.open		= uuids_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int inquiry_cache_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct discovery_state *cache = &hdev->discovery; +	struct inquiry_entry *e; + +	hci_dev_lock(hdev); + +	list_for_each_entry(e, &cache->all, all) { +		struct inquiry_data *data = &e->data; +		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", +			   &data->bdaddr, +			   data->pscan_rep_mode, data->pscan_period_mode, +			   data->pscan_mode, data->dev_class[2], +			   data->dev_class[1], data->dev_class[0], +			   __le16_to_cpu(data->clock_offset), +			   data->rssi, data->ssp_mode, e->timestamp); +	} + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int inquiry_cache_open(struct inode *inode, struct file *file) +{ +	return single_open(file, inquiry_cache_show, inode->i_private); +} + +static const struct file_operations inquiry_cache_fops = { +	.open		= inquiry_cache_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int link_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->link_keys) { +		struct link_key *key = list_entry(p, struct link_key, list); +		seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, +			   HCI_LINK_KEY_SIZE, key->val, key->pin_len); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int link_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, link_keys_show, inode->i_private); +} + +static const struct file_operations link_keys_fops = { +	.open		= link_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int dev_class_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], +		   hdev->dev_class[1], hdev->dev_class[0]); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int dev_class_open(struct inode *inode, struct file *file) +{ +	return single_open(file, dev_class_show, inode->i_private); +} + +static const struct file_operations dev_class_fops = { +	.open		= dev_class_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int voice_setting_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->voice_setting; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get, +			NULL, "0x%4.4llx\n"); + +static int auto_accept_delay_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	hdev->auto_accept_delay = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int auto_accept_delay_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->auto_accept_delay; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, +			auto_accept_delay_set, "%llu\n"); + +static int ssp_debug_mode_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; +	struct sk_buff *skb; +	__u8 mode; +	int err; + +	if (val != 0 && val != 1) +		return -EINVAL; + +	if (!test_bit(HCI_UP, &hdev->flags)) +		return -ENETDOWN; + +	hci_req_lock(hdev); +	mode = val; +	skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), +			     &mode, HCI_CMD_TIMEOUT); +	hci_req_unlock(hdev); + +	if (IS_ERR(skb)) +		return PTR_ERR(skb); + +	err = -bt_to_errno(skb->data[0]); +	kfree_skb(skb); + +	if (err < 0) +		return err; + +	hci_dev_lock(hdev); +	hdev->ssp_debug_mode = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int ssp_debug_mode_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->ssp_debug_mode; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get, +			ssp_debug_mode_set, "%llu\n"); + +static ssize_t force_sc_support_read(struct file *file, char __user *user_buf, +				     size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_sc_support_write(struct file *file, +				      const char __user *user_buf, +				      size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; + +	if (test_bit(HCI_UP, &hdev->flags)) +		return -EBUSY; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_FORCE_SC, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations force_sc_support_fops = { +	.open		= simple_open, +	.read		= force_sc_support_read, +	.write		= force_sc_support_write, +	.llseek		= default_llseek, +}; + +static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, +				 size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static const struct file_operations sc_only_mode_fops = { +	.open		= simple_open, +	.read		= sc_only_mode_read, +	.llseek		= default_llseek, +}; + +static int idle_timeout_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val != 0 && (val < 500 || val > 3600000)) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->idle_timeout = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int idle_timeout_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->idle_timeout; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, +			idle_timeout_set, "%llu\n"); + +static int rpa_timeout_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	/* Require the RPA timeout to be at least 30 seconds and at most +	 * 24 hours. +	 */ +	if (val < 30 || val > (60 * 60 * 24)) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->rpa_timeout = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int rpa_timeout_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->rpa_timeout; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, +			rpa_timeout_set, "%llu\n"); + +static int sniff_min_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val % 2 || val > hdev->sniff_max_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->sniff_min_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int sniff_min_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->sniff_min_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, +			sniff_min_interval_set, "%llu\n"); + +static int sniff_max_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val % 2 || val < hdev->sniff_min_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->sniff_max_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int sniff_max_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->sniff_max_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, +			sniff_max_interval_set, "%llu\n"); + +static int conn_info_min_age_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val > hdev->conn_info_max_age) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->conn_info_min_age = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_info_min_age_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->conn_info_min_age; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, +			conn_info_min_age_set, "%llu\n"); + +static int conn_info_max_age_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val < hdev->conn_info_min_age) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->conn_info_max_age = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_info_max_age_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->conn_info_max_age; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, +			conn_info_max_age_set, "%llu\n"); + +static int identity_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	bdaddr_t addr; +	u8 addr_type; + +	hci_dev_lock(hdev); + +	hci_copy_identity_address(hdev, &addr, &addr_type); + +	seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, +		   16, hdev->irk, &hdev->rpa); + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int identity_open(struct inode *inode, struct file *file) +{ +	return single_open(file, identity_show, inode->i_private); +} + +static const struct file_operations identity_fops = { +	.open		= identity_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int random_address_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "%pMR\n", &hdev->random_addr); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int random_address_open(struct inode *inode, struct file *file) +{ +	return single_open(file, random_address_show, inode->i_private); +} + +static const struct file_operations random_address_fops = { +	.open		= random_address_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int static_address_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "%pMR\n", &hdev->static_addr); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int static_address_open(struct inode *inode, struct file *file) +{ +	return single_open(file, static_address_show, inode->i_private); +} + +static const struct file_operations static_address_fops = { +	.open		= static_address_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static ssize_t force_static_address_read(struct file *file, +					 char __user *user_buf, +					 size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_static_address_write(struct file *file, +					  const char __user *user_buf, +					  size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; + +	if (test_bit(HCI_UP, &hdev->flags)) +		return -EBUSY; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations force_static_address_fops = { +	.open		= simple_open, +	.read		= force_static_address_read, +	.write		= force_static_address_write, +	.llseek		= default_llseek, +}; + +static int white_list_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct bdaddr_list *b; + +	hci_dev_lock(hdev); +	list_for_each_entry(b, &hdev->le_white_list, list) +		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int white_list_open(struct inode *inode, struct file *file) +{ +	return single_open(file, white_list_show, inode->i_private); +} + +static const struct file_operations white_list_fops = { +	.open		= white_list_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int identity_resolving_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->identity_resolving_keys) { +		struct smp_irk *irk = list_entry(p, struct smp_irk, list); +		seq_printf(f, "%pMR (type %u) %*phN %pMR\n", +			   &irk->bdaddr, irk->addr_type, +			   16, irk->val, &irk->rpa); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int identity_resolving_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, identity_resolving_keys_show, +			   inode->i_private); +} + +static const struct file_operations identity_resolving_keys_fops = { +	.open		= identity_resolving_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int long_term_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->long_term_keys) { +		struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list); +		seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", +			   <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, +			   ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), +			   __le64_to_cpu(ltk->rand), 16, ltk->val); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int long_term_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, long_term_keys_show, inode->i_private); +} + +static const struct file_operations long_term_keys_fops = { +	.open		= long_term_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int conn_min_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_conn_min_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_min_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_conn_min_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, +			conn_min_interval_set, "%llu\n"); + +static int conn_max_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_conn_max_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_max_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_conn_max_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, +			conn_max_interval_set, "%llu\n"); + +static int adv_channel_map_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x01 || val > 0x07) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_adv_channel_map = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int adv_channel_map_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_adv_channel_map; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, +			adv_channel_map_set, "%llu\n"); + +static ssize_t lowpan_read(struct file *file, char __user *user_buf, +			   size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer, +			    size_t count, loff_t *position) +{ +	struct hci_dev *hdev = fp->private_data; +	bool enable; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); + +	if (copy_from_user(buf, user_buffer, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; + +	if (strtobool(buf, &enable) < 0) +		return -EINVAL; + +	if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations lowpan_debugfs_fops = { +	.open		= simple_open, +	.read		= lowpan_read, +	.write		= lowpan_write, +	.llseek		= default_llseek, +}; + +static int le_auto_conn_show(struct seq_file *sf, void *ptr) +{ +	struct hci_dev *hdev = sf->private; +	struct hci_conn_params *p; + +	hci_dev_lock(hdev); + +	list_for_each_entry(p, &hdev->le_conn_params, list) { +		seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type, +			   p->auto_connect); +	} + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int le_auto_conn_open(struct inode *inode, struct file *file) +{ +	return single_open(file, le_auto_conn_show, inode->i_private); +} + +static ssize_t le_auto_conn_write(struct file *file, const char __user *data, +				  size_t count, loff_t *offset) +{ +	struct seq_file *sf = file->private_data; +	struct hci_dev *hdev = sf->private; +	u8 auto_connect = 0; +	bdaddr_t addr; +	u8 addr_type; +	char *buf; +	int err = 0; +	int n; + +	/* Don't allow partial write */ +	if (*offset != 0) +		return -EINVAL; + +	if (count < 3) +		return -EINVAL; + +	buf = memdup_user(data, count); +	if (IS_ERR(buf)) +		return PTR_ERR(buf); + +	if (memcmp(buf, "add", 3) == 0) { +		n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", +			   &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], +			   &addr.b[1], &addr.b[0], &addr_type, +			   &auto_connect); + +		if (n < 7) { +			err = -EINVAL; +			goto done; +		} + +		hci_dev_lock(hdev); +		err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, +					  hdev->le_conn_min_interval, +					  hdev->le_conn_max_interval); +		hci_dev_unlock(hdev); + +		if (err) +			goto done; +	} else if (memcmp(buf, "del", 3) == 0) { +		n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", +			   &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], +			   &addr.b[1], &addr.b[0], &addr_type); + +		if (n < 7) { +			err = -EINVAL; +			goto done; +		} + +		hci_dev_lock(hdev); +		hci_conn_params_del(hdev, &addr, addr_type); +		hci_dev_unlock(hdev); +	} else if (memcmp(buf, "clr", 3) == 0) { +		hci_dev_lock(hdev); +		hci_conn_params_clear(hdev); +		hci_pend_le_conns_clear(hdev); +		hci_update_background_scan(hdev); +		hci_dev_unlock(hdev); +	} else { +		err = -EINVAL; +	} + +done: +	kfree(buf); + +	if (err) +		return err; +	else +		return count; +} + +static const struct file_operations le_auto_conn_fops = { +	.open		= le_auto_conn_open, +	.read		= seq_read, +	.write		= le_auto_conn_write, +	.llseek		= seq_lseek, +	.release	= single_release, +}; +  /* ---- HCI requests ---- */  static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) @@ -307,11 +1327,23 @@ static void amp_init(struct hci_request *req)  	/* Read Local Version */  	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); +	/* Read Local Supported Commands */ +	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); + +	/* Read Local Supported Features */ +	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); +  	/* Read Local AMP Info */  	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);  	/* Read Data Blk size */  	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); + +	/* Read Flow Control Mode */ +	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); + +	/* Read Location Data */ +	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);  }  static void hci_init1_req(struct hci_request *req, unsigned long opt) @@ -341,6 +1373,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)  static void bredr_setup(struct hci_request *req)  { +	struct hci_dev *hdev = req->hdev; +  	__le16 param;  	__u8 flt_type; @@ -356,16 +1390,24 @@ static void bredr_setup(struct hci_request *req)  	/* Read Voice Setting */  	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); +	/* Read Number of Supported IAC */ +	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); + +	/* Read Current IAC LAP */ +	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); +  	/* Clear Event Filters */  	flt_type = HCI_FLT_CLEAR_ALL;  	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);  	/* Connection accept timeout ~20 secs */ -	param = __constant_cpu_to_le16(0x7d00); +	param = cpu_to_le16(0x7d00);  	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); -	/* Read page scan parameters */ -	if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { +	/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2, +	 * but it does not support page scan related HCI commands. +	 */ +	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {  		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);  		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);  	} @@ -381,14 +1423,17 @@ static void le_setup(struct hci_request *req)  	/* Read LE Local Supported Features */  	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); +	/* Read LE Supported States */ +	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); +  	/* Read LE Advertising Channel TX Power */  	hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);  	/* Read LE White List Size */  	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); -	/* Read LE Supported States */ -	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); +	/* Clear LE White List */ +	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);  	/* LE-only controllers have LE implicitly enabled */  	if (!lmp_bredr_capable(hdev)) @@ -519,6 +1564,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)  	if (lmp_bredr_capable(hdev))  		bredr_setup(req); +	else +		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);  	if (lmp_le_capable(hdev))  		le_setup(req); @@ -532,6 +1579,14 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)  		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);  	if (lmp_ssp_capable(hdev)) { +		/* When SSP is available, then the host features page +		 * should also be available as well. However some +		 * controllers list the max_page as 0 as long as SSP +		 * has not been enabled. To achieve proper debugging +		 * output, force the minimum max_page to 1 at least. +		 */ +		hdev->max_page = 0x01; +  		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {  			u8 mode = 0x01;  			hci_req_add(req, HCI_OP_WRITE_SSP_MODE, @@ -607,6 +1662,38 @@ static void hci_set_le_support(struct hci_request *req)  			    &cp);  } +static void hci_set_event_mask_page_2(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +	/* If Connectionless Slave Broadcast master role is supported +	 * enable all necessary events for it. +	 */ +	if (lmp_csb_master_capable(hdev)) { +		events[1] |= 0x40;	/* Triggered Clock Capture */ +		events[1] |= 0x80;	/* Synchronization Train Complete */ +		events[2] |= 0x10;	/* Slave Page Response Timeout */ +		events[2] |= 0x20;	/* CSB Channel Map Change */ +	} + +	/* If Connectionless Slave Broadcast slave role is supported +	 * enable all necessary events for it. +	 */ +	if (lmp_csb_slave_capable(hdev)) { +		events[2] |= 0x01;	/* Synchronization Train Received */ +		events[2] |= 0x02;	/* CSB Receive */ +		events[2] |= 0x04;	/* CSB Timeout */ +		events[2] |= 0x08;	/* Truncated Page Complete */ +	} + +	/* Enable Authenticated Payload Timeout Expired event if supported */ +	if (lmp_ping_capable(hdev)) +		events[2] |= 0x80; + +	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); +} +  static void hci_init3_req(struct hci_request *req, unsigned long opt)  {  	struct hci_dev *hdev = req->hdev; @@ -620,8 +1707,13 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	 * as supported send it. If not supported assume that the controller  	 * does not have actual support for stored link keys which makes this  	 * command redundant anyway. +	 * +	 * Some controllers indicate that they support handling deleting +	 * stored link keys, but they don't. The quirk lets a driver +	 * just disable this command.  	 */ -	if (hdev->commands[6] & 0x80) { +	if (hdev->commands[6] & 0x80 && +	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {  		struct hci_cp_delete_stored_link_key cp;  		bacpy(&cp.bdaddr, BDADDR_ANY); @@ -633,10 +1725,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	if (hdev->commands[5] & 0x10)  		hci_setup_link_policy(req); -	if (lmp_le_capable(hdev)) { +	if (lmp_le_capable(hdev))  		hci_set_le_support(req); -		hci_update_ad(req); -	}  	/* Read features beyond page 1 if available */  	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { @@ -648,6 +1738,28 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	}  } +static void hci_init4_req(struct hci_request *req, unsigned long opt) +{ +	struct hci_dev *hdev = req->hdev; + +	/* Set event mask page 2 if the HCI command for it is supported */ +	if (hdev->commands[22] & 0x04) +		hci_set_event_mask_page_2(req); + +	/* Check for Synchronization Train support */ +	if (lmp_sync_train_capable(hdev)) +		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); + +	/* Enable Secure Connections if supported and configured */ +	if ((lmp_sc_capable(hdev) || +	     test_bit(HCI_FORCE_SC, &hdev->dev_flags)) && +	    test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { +		u8 support = 0x01; +		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, +			    sizeof(support), &support); +	} +} +  static int __hci_init(struct hci_dev *hdev)  {  	int err; @@ -656,6 +1768,14 @@ static int __hci_init(struct hci_dev *hdev)  	if (err < 0)  		return err; +	/* The Device Under Test (DUT) mode is special and available for +	 * all controller types. So just create it early on. +	 */ +	if (test_bit(HCI_SETUP, &hdev->dev_flags)) { +		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, +				    &dut_mode_fops); +	} +  	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode  	 * BR/EDR/LE type controllers. AMP controllers only need the  	 * first stage init. @@ -667,7 +1787,110 @@ static int __hci_init(struct hci_dev *hdev)  	if (err < 0)  		return err; -	return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); +	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); +	if (err < 0) +		return err; + +	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); +	if (err < 0) +		return err; + +	/* Only create debugfs entries during the initial setup +	 * phase and not every time the controller gets powered on. +	 */ +	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) +		return 0; + +	debugfs_create_file("features", 0444, hdev->debugfs, hdev, +			    &features_fops); +	debugfs_create_u16("manufacturer", 0444, hdev->debugfs, +			   &hdev->manufacturer); +	debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); +	debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); +	debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, +			    &blacklist_fops); +	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); + +	debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, +			    &conn_info_min_age_fops); +	debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev, +			    &conn_info_max_age_fops); + +	if (lmp_bredr_capable(hdev)) { +		debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, +				    hdev, &inquiry_cache_fops); +		debugfs_create_file("link_keys", 0400, hdev->debugfs, +				    hdev, &link_keys_fops); +		debugfs_create_file("dev_class", 0444, hdev->debugfs, +				    hdev, &dev_class_fops); +		debugfs_create_file("voice_setting", 0444, hdev->debugfs, +				    hdev, &voice_setting_fops); +	} + +	if (lmp_ssp_capable(hdev)) { +		debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, +				    hdev, &auto_accept_delay_fops); +		debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs, +				    hdev, &ssp_debug_mode_fops); +		debugfs_create_file("force_sc_support", 0644, hdev->debugfs, +				    hdev, &force_sc_support_fops); +		debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, +				    hdev, &sc_only_mode_fops); +	} + +	if (lmp_sniff_capable(hdev)) { +		debugfs_create_file("idle_timeout", 0644, hdev->debugfs, +				    hdev, &idle_timeout_fops); +		debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs, +				    hdev, &sniff_min_interval_fops); +		debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs, +				    hdev, &sniff_max_interval_fops); +	} + +	if (lmp_le_capable(hdev)) { +		debugfs_create_file("identity", 0400, hdev->debugfs, +				    hdev, &identity_fops); +		debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, +				    hdev, &rpa_timeout_fops); +		debugfs_create_file("random_address", 0444, hdev->debugfs, +				    hdev, &random_address_fops); +		debugfs_create_file("static_address", 0444, hdev->debugfs, +				    hdev, &static_address_fops); + +		/* For controllers with a public address, provide a debug +		 * option to force the usage of the configured static +		 * address. By default the public address is used. +		 */ +		if (bacmp(&hdev->bdaddr, BDADDR_ANY)) +			debugfs_create_file("force_static_address", 0644, +					    hdev->debugfs, hdev, +					    &force_static_address_fops); + +		debugfs_create_u8("white_list_size", 0444, hdev->debugfs, +				  &hdev->le_white_list_size); +		debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, +				    &white_list_fops); +		debugfs_create_file("identity_resolving_keys", 0400, +				    hdev->debugfs, hdev, +				    &identity_resolving_keys_fops); +		debugfs_create_file("long_term_keys", 0400, hdev->debugfs, +				    hdev, &long_term_keys_fops); +		debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, +				    hdev, &conn_min_interval_fops); +		debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, +				    hdev, &conn_max_interval_fops); +		debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, +				    hdev, &adv_channel_map_fops); +		debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, +				    &lowpan_debugfs_fops); +		debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, +				    &le_auto_conn_fops); +		debugfs_create_u16("discov_interleaved_timeout", 0644, +				   hdev->debugfs, +				   &hdev->discov_interleaved_timeout); +	} + +	return 0;  }  static void hci_scan_req(struct hci_request *req, unsigned long opt) @@ -757,6 +1980,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)  	switch (state) {  	case DISCOVERY_STOPPED: +		hci_update_background_scan(hdev); +  		if (hdev->discovery.state != DISCOVERY_STARTING)  			mgmt_discovering(hdev, 0);  		break; @@ -868,12 +2093,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,  	hci_remove_remote_oob_data(hdev, &data->bdaddr); -	if (ssp) -		*ssp = data->ssp_mode; +	*ssp = data->ssp_mode;  	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);  	if (ie) { -		if (ie->data.ssp_mode && ssp) +		if (ie->data.ssp_mode)  			*ssp = true;  		if (ie->name_state == NAME_NEEDED && @@ -984,6 +2208,21 @@ int hci_inquiry(void __user *arg)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} + +	if (hdev->dev_type != HCI_BREDR) { +		err = -EOPNOTSUPP; +		goto done; +	} + +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		err = -EOPNOTSUPP; +		goto done; +	} +  	hci_dev_lock(hdev);  	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||  	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { @@ -1043,100 +2282,10 @@ done:  	return err;  } -static u8 create_ad(struct hci_dev *hdev, u8 *ptr) +static int hci_dev_do_open(struct hci_dev *hdev)  { -	u8 ad_len = 0, flags = 0; -	size_t name_len; - -	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) -		flags |= LE_AD_GENERAL; - -	if (!lmp_bredr_capable(hdev)) -		flags |= LE_AD_NO_BREDR; - -	if (lmp_le_br_capable(hdev)) -		flags |= LE_AD_SIM_LE_BREDR_CTRL; - -	if (lmp_host_le_br_capable(hdev)) -		flags |= LE_AD_SIM_LE_BREDR_HOST; - -	if (flags) { -		BT_DBG("adv flags 0x%02x", flags); - -		ptr[0] = 2; -		ptr[1] = EIR_FLAGS; -		ptr[2] = flags; - -		ad_len += 3; -		ptr += 3; -	} - -	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) { -		ptr[0] = 2; -		ptr[1] = EIR_TX_POWER; -		ptr[2] = (u8) hdev->adv_tx_power; - -		ad_len += 3; -		ptr += 3; -	} - -	name_len = strlen(hdev->dev_name); -	if (name_len > 0) { -		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; - -		if (name_len > max_len) { -			name_len = max_len; -			ptr[1] = EIR_NAME_SHORT; -		} else -			ptr[1] = EIR_NAME_COMPLETE; - -		ptr[0] = name_len + 1; - -		memcpy(ptr + 2, hdev->dev_name, name_len); - -		ad_len += (name_len + 2); -		ptr += (name_len + 2); -	} - -	return ad_len; -} - -void hci_update_ad(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_le_set_adv_data cp; -	u8 len; - -	if (!lmp_le_capable(hdev)) -		return; - -	memset(&cp, 0, sizeof(cp)); - -	len = create_ad(hdev, cp.data); - -	if (hdev->adv_data_len == len && -	    memcmp(cp.data, hdev->adv_data, len) == 0) -		return; - -	memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); -	hdev->adv_data_len = len; - -	cp.length = len; - -	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); -} - -/* ---- HCI ioctl helpers ---- */ - -int hci_dev_open(__u16 dev) -{ -	struct hci_dev *hdev;  	int ret = 0; -	hdev = hci_dev_get(dev); -	if (!hdev) -		return -ENODEV; -  	BT_DBG("%s %p", hdev->name, hdev);  	hci_req_lock(hdev); @@ -1146,13 +2295,34 @@ int hci_dev_open(__u16 dev)  		goto done;  	} -	/* Check for rfkill but allow the HCI setup stage to proceed -	 * (which in itself doesn't cause any RF activity). -	 */ -	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) && -	    !test_bit(HCI_SETUP, &hdev->dev_flags)) { -		ret = -ERFKILL; -		goto done; +	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { +		/* Check for rfkill but allow the HCI setup stage to +		 * proceed (which in itself doesn't cause any RF activity). +		 */ +		if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { +			ret = -ERFKILL; +			goto done; +		} + +		/* Check for valid public address or a configured static +		 * random adddress, but let the HCI setup proceed to +		 * be able to determine if there is a public address +		 * or not. +		 * +		 * In case of user channel usage, it is not important +		 * if a public address or static random address is +		 * available. +		 * +		 * This check is only valid for BR/EDR controllers +		 * since AMP controllers do not have an address. +		 */ +		if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && +		    hdev->dev_type == HCI_BREDR && +		    !bacmp(&hdev->bdaddr, BDADDR_ANY) && +		    !bacmp(&hdev->static_addr, BDADDR_ANY)) { +			ret = -EADDRNOTAVAIL; +			goto done; +		}  	}  	if (test_bit(HCI_UP, &hdev->flags)) { @@ -1172,16 +2342,11 @@ int hci_dev_open(__u16 dev)  		ret = hdev->setup(hdev);  	if (!ret) { -		/* Treat all non BR/EDR controllers as raw devices if -		 * enable_hs is not set. -		 */ -		if (hdev->dev_type != HCI_BREDR && !enable_hs) -			set_bit(HCI_RAW, &hdev->flags); -  		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))  			set_bit(HCI_RAW, &hdev->flags); -		if (!test_bit(HCI_RAW, &hdev->flags)) +		if (!test_bit(HCI_RAW, &hdev->flags) && +		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))  			ret = __hci_init(hdev);  	} @@ -1189,10 +2354,12 @@ int hci_dev_open(__u16 dev)  	if (!ret) {  		hci_dev_hold(hdev); +		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);  		set_bit(HCI_UP, &hdev->flags);  		hci_notify(hdev, HCI_DEV_UP);  		if (!test_bit(HCI_SETUP, &hdev->dev_flags) && -		    mgmt_valid_hdev(hdev)) { +		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && +		    hdev->dev_type == HCI_BREDR) {  			hci_dev_lock(hdev);  			mgmt_powered(hdev, 1);  			hci_dev_unlock(hdev); @@ -1220,10 +2387,41 @@ int hci_dev_open(__u16 dev)  done:  	hci_req_unlock(hdev); -	hci_dev_put(hdev);  	return ret;  } +/* ---- HCI ioctl helpers ---- */ + +int hci_dev_open(__u16 dev) +{ +	struct hci_dev *hdev; +	int err; + +	hdev = hci_dev_get(dev); +	if (!hdev) +		return -ENODEV; + +	/* We need to ensure that no other power on/off work is pending +	 * before proceeding to call hci_dev_do_open. This is +	 * particularly important if the setup procedure has not yet +	 * completed. +	 */ +	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) +		cancel_delayed_work(&hdev->power_off); + +	/* After this call it is guaranteed that the setup procedure +	 * has finished. This means that error conditions like RFKILL +	 * or no valid public or static random address apply. +	 */ +	flush_workqueue(hdev->req_workqueue); + +	err = hci_dev_do_open(hdev); + +	hci_dev_put(hdev); + +	return err; +} +  static int hci_dev_do_close(struct hci_dev *hdev)  {  	BT_DBG("%s %p", hdev->name, hdev); @@ -1247,6 +2445,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)  		cancel_delayed_work(&hdev->discov_off);  		hdev->discov_timeout = 0;  		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);  	}  	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) @@ -1254,9 +2453,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	cancel_delayed_work_sync(&hdev->le_scan_disable); +	if (test_bit(HCI_MGMT, &hdev->dev_flags)) +		cancel_delayed_work_sync(&hdev->rpa_expired); +  	hci_dev_lock(hdev);  	hci_inquiry_cache_flush(hdev);  	hci_conn_hash_flush(hdev); +	hci_pend_le_conns_clear(hdev);  	hci_dev_unlock(hdev);  	hci_notify(hdev, HCI_DEV_DOWN); @@ -1268,6 +2471,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	skb_queue_purge(&hdev->cmd_q);  	atomic_set(&hdev->cmd_cnt, 1);  	if (!test_bit(HCI_RAW, &hdev->flags) && +	    !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&  	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {  		set_bit(HCI_INIT, &hdev->flags);  		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); @@ -1300,18 +2504,20 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	hdev->flags = 0;  	hdev->dev_flags &= ~HCI_PERSISTENT_MASK; -	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && -	    mgmt_valid_hdev(hdev)) { -		hci_dev_lock(hdev); -		mgmt_powered(hdev, 0); -		hci_dev_unlock(hdev); +	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { +		if (hdev->dev_type == HCI_BREDR) { +			hci_dev_lock(hdev); +			mgmt_powered(hdev, 0); +			hci_dev_unlock(hdev); +		}  	}  	/* Controller radio is available but is currently powered down */ -	hdev->amp_status = 0; +	hdev->amp_status = AMP_STATUS_POWERED_DOWN;  	memset(hdev->eir, 0, sizeof(hdev->eir));  	memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); +	bacpy(&hdev->random_addr, BDADDR_ANY);  	hci_req_unlock(hdev); @@ -1328,11 +2534,17 @@ int hci_dev_close(__u16 dev)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} +  	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))  		cancel_delayed_work(&hdev->power_off);  	err = hci_dev_do_close(hdev); +done:  	hci_dev_put(hdev);  	return err;  } @@ -1348,8 +2560,15 @@ int hci_dev_reset(__u16 dev)  	hci_req_lock(hdev); -	if (!test_bit(HCI_UP, &hdev->flags)) +	if (!test_bit(HCI_UP, &hdev->flags)) { +		ret = -ENETDOWN; +		goto done; +	} + +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		ret = -EBUSY;  		goto done; +	}  	/* Drop queues */  	skb_queue_purge(&hdev->rx_q); @@ -1384,10 +2603,15 @@ int hci_dev_reset_stat(__u16 dev)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		ret = -EBUSY; +		goto done; +	} +  	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); +done:  	hci_dev_put(hdev); -  	return ret;  } @@ -1404,6 +2628,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} + +	if (hdev->dev_type != HCI_BREDR) { +		err = -EOPNOTSUPP; +		goto done; +	} + +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		err = -EOPNOTSUPP; +		goto done; +	} +  	switch (cmd) {  	case HCISETAUTH:  		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, @@ -1462,6 +2701,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)  		break;  	} +done:  	hci_dev_put(hdev);  	return err;  } @@ -1534,7 +2774,7 @@ int hci_get_dev_info(void __user *arg)  	strcpy(di.name, hdev->name);  	di.bdaddr   = hdev->bdaddr; -	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4); +	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);  	di.flags    = hdev->flags;  	di.pkt_type = hdev->pkt_type;  	if (lmp_bredr_capable(hdev)) { @@ -1570,6 +2810,9 @@ static int hci_rfkill_set_block(void *data, bool blocked)  	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) +		return -EBUSY; +  	if (blocked) {  		set_bit(HCI_RFKILLED, &hdev->dev_flags);  		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) @@ -1592,13 +2835,20 @@ static void hci_power_on(struct work_struct *work)  	BT_DBG("%s", hdev->name); -	err = hci_dev_open(hdev->id); +	err = hci_dev_do_open(hdev);  	if (err < 0) {  		mgmt_set_powered_failed(hdev, err);  		return;  	} -	if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { +	/* During the HCI setup phase, a few error conditions are +	 * ignored and they need to be checked now. If they are still +	 * valid, it is important to turn the device back off. +	 */ +	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || +	    (hdev->dev_type == HCI_BREDR && +	     !bacmp(&hdev->bdaddr, BDADDR_ANY) && +	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {  		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);  		hci_dev_do_close(hdev);  	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { @@ -1623,22 +2873,15 @@ static void hci_power_off(struct work_struct *work)  static void hci_discov_off(struct work_struct *work)  {  	struct hci_dev *hdev; -	u8 scan = SCAN_PAGE;  	hdev = container_of(work, struct hci_dev, discov_off.work);  	BT_DBG("%s", hdev->name); -	hci_dev_lock(hdev); - -	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); - -	hdev->discov_timeout = 0; - -	hci_dev_unlock(hdev); +	mgmt_discoverable_timeout(hdev);  } -int hci_uuids_clear(struct hci_dev *hdev) +void hci_uuids_clear(struct hci_dev *hdev)  {  	struct bt_uuid *uuid, *tmp; @@ -1646,11 +2889,9 @@ int hci_uuids_clear(struct hci_dev *hdev)  		list_del(&uuid->list);  		kfree(uuid);  	} - -	return 0;  } -int hci_link_keys_clear(struct hci_dev *hdev) +void hci_link_keys_clear(struct hci_dev *hdev)  {  	struct list_head *p, *n; @@ -1662,11 +2903,9 @@ int hci_link_keys_clear(struct hci_dev *hdev)  		list_del(p);  		kfree(key);  	} - -	return 0;  } -int hci_smp_ltks_clear(struct hci_dev *hdev) +void hci_smp_ltks_clear(struct hci_dev *hdev)  {  	struct smp_ltk *k, *tmp; @@ -1674,8 +2913,16 @@ int hci_smp_ltks_clear(struct hci_dev *hdev)  		list_del(&k->list);  		kfree(k);  	} +} -	return 0; +void hci_smp_irks_clear(struct hci_dev *hdev) +{ +	struct smp_irk *k, *tmp; + +	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { +		list_del(&k->list); +		kfree(k); +	}  }  struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1725,13 +2972,24 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,  	return false;  } -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) +static bool ltk_type_master(u8 type) +{ +	if (type == HCI_SMP_STK || type == HCI_SMP_LTK) +		return true; + +	return false; +} + +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, +			     bool master)  {  	struct smp_ltk *k;  	list_for_each_entry(k, &hdev->long_term_keys, list) { -		if (k->ediv != ediv || -		    memcmp(rand, k->rand, sizeof(k->rand))) +		if (k->ediv != ediv || k->rand != rand) +			continue; + +		if (ltk_type_master(k->type) != master)  			continue;  		return k; @@ -1741,18 +2999,56 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])  }  struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, -				     u8 addr_type) +				     u8 addr_type, bool master)  {  	struct smp_ltk *k;  	list_for_each_entry(k, &hdev->long_term_keys, list)  		if (addr_type == k->bdaddr_type && -		    bacmp(bdaddr, &k->bdaddr) == 0) +		    bacmp(bdaddr, &k->bdaddr) == 0 && +		    ltk_type_master(k->type) == master)  			return k;  	return NULL;  } +struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) +{ +	struct smp_irk *irk; + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (!bacmp(&irk->rpa, rpa)) +			return irk; +	} + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) { +			bacpy(&irk->rpa, rpa); +			return irk; +		} +	} + +	return NULL; +} + +struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, +				     u8 addr_type) +{ +	struct smp_irk *irk; + +	/* Identity Address must be public or static random */ +	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) +		return NULL; + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (addr_type == irk->addr_type && +		    bacmp(bdaddr, &irk->bdaddr) == 0) +			return irk; +	} + +	return NULL; +} +  int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)  { @@ -1766,7 +3062,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  		key = old_key;  	} else {  		old_key_type = conn ? conn->key_type : 0xff; -		key = kzalloc(sizeof(*key), GFP_ATOMIC); +		key = kzalloc(sizeof(*key), GFP_KERNEL);  		if (!key)  			return -ENOMEM;  		list_add(&key->list, &hdev->link_keys); @@ -1806,22 +3102,20 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  	return 0;  } -int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, -		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16 -		ediv, u8 rand[8]) +struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 addr_type, u8 type, u8 authenticated, +			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)  {  	struct smp_ltk *key, *old_key; +	bool master = ltk_type_master(type); -	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK)) -		return 0; - -	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type); +	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);  	if (old_key)  		key = old_key;  	else { -		key = kzalloc(sizeof(*key), GFP_ATOMIC); +		key = kzalloc(sizeof(*key), GFP_KERNEL);  		if (!key) -			return -ENOMEM; +			return NULL;  		list_add(&key->list, &hdev->long_term_keys);  	} @@ -1830,17 +3124,34 @@ int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,  	memcpy(key->val, tk, sizeof(key->val));  	key->authenticated = authenticated;  	key->ediv = ediv; +	key->rand = rand;  	key->enc_size = enc_size;  	key->type = type; -	memcpy(key->rand, rand, sizeof(key->rand)); -	if (!new_key) -		return 0; +	return key; +} + +struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 addr_type, u8 val[16], bdaddr_t *rpa) +{ +	struct smp_irk *irk; -	if (type & HCI_SMP_LTK) -		mgmt_new_ltk(hdev, key, 1); +	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); +	if (!irk) { +		irk = kzalloc(sizeof(*irk), GFP_KERNEL); +		if (!irk) +			return NULL; -	return 0; +		bacpy(&irk->bdaddr, bdaddr); +		irk->addr_type = addr_type; + +		list_add(&irk->list, &hdev->identity_resolving_keys); +	} + +	memcpy(irk->val, val, 16); +	bacpy(&irk->rpa, rpa); + +	return irk;  }  int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1859,21 +3170,38 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)  	return 0;  } -int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr) +int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)  {  	struct smp_ltk *k, *tmp; +	int removed = 0;  	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { -		if (bacmp(bdaddr, &k->bdaddr)) +		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)  			continue;  		BT_DBG("%s removing %pMR", hdev->name, bdaddr);  		list_del(&k->list);  		kfree(k); +		removed++;  	} -	return 0; +	return removed ? 0 : -ENOENT; +} + +void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) +{ +	struct smp_irk *k, *tmp; + +	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { +		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) +			continue; + +		BT_DBG("%s removing %pMR", hdev->name, bdaddr); + +		list_del(&k->list); +		kfree(k); +	}  }  /* HCI command timer function */ @@ -1922,7 +3250,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)  	return 0;  } -int hci_remote_oob_data_clear(struct hci_dev *hdev) +void hci_remote_oob_data_clear(struct hci_dev *hdev)  {  	struct oob_data *data, *n; @@ -1930,19 +3258,43 @@ int hci_remote_oob_data_clear(struct hci_dev *hdev)  		list_del(&data->list);  		kfree(data);  	} +} + +int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 *hash, u8 *randomizer) +{ +	struct oob_data *data; + +	data = hci_find_remote_oob_data(hdev, bdaddr); +	if (!data) { +		data = kmalloc(sizeof(*data), GFP_KERNEL); +		if (!data) +			return -ENOMEM; + +		bacpy(&data->bdaddr, bdaddr); +		list_add(&data->list, &hdev->remote_oob_data); +	} + +	memcpy(data->hash192, hash, sizeof(data->hash192)); +	memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192)); + +	memset(data->hash256, 0, sizeof(data->hash256)); +	memset(data->randomizer256, 0, sizeof(data->randomizer256)); + +	BT_DBG("%s for %pMR", hdev->name, bdaddr);  	return 0;  } -int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, -			    u8 *randomizer) +int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, +				u8 *hash192, u8 *randomizer192, +				u8 *hash256, u8 *randomizer256)  {  	struct oob_data *data;  	data = hci_find_remote_oob_data(hdev, bdaddr); -  	if (!data) { -		data = kmalloc(sizeof(*data), GFP_ATOMIC); +		data = kmalloc(sizeof(*data), GFP_KERNEL);  		if (!data)  			return -ENOMEM; @@ -1950,49 +3302,50 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,  		list_add(&data->list, &hdev->remote_oob_data);  	} -	memcpy(data->hash, hash, sizeof(data->hash)); -	memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); +	memcpy(data->hash192, hash192, sizeof(data->hash192)); +	memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192)); + +	memcpy(data->hash256, hash256, sizeof(data->hash256)); +	memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));  	BT_DBG("%s for %pMR", hdev->name, bdaddr);  	return 0;  } -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, +					 bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *b; -	list_for_each_entry(b, &hdev->blacklist, list) -		if (bacmp(bdaddr, &b->bdaddr) == 0) +	list_for_each_entry(b, &hdev->blacklist, list) { +		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)  			return b; +	}  	return NULL;  } -int hci_blacklist_clear(struct hci_dev *hdev) +static void hci_blacklist_clear(struct hci_dev *hdev)  {  	struct list_head *p, *n;  	list_for_each_safe(p, n, &hdev->blacklist) { -		struct bdaddr_list *b; - -		b = list_entry(p, struct bdaddr_list, list); +		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);  		list_del(p);  		kfree(b);  	} - -	return 0;  }  int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *entry; -	if (bacmp(bdaddr, BDADDR_ANY) == 0) +	if (!bacmp(bdaddr, BDADDR_ANY))  		return -EBADF; -	if (hci_blacklist_lookup(hdev, bdaddr)) +	if (hci_blacklist_lookup(hdev, bdaddr, type))  		return -EEXIST;  	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); @@ -2000,6 +3353,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  		return -ENOMEM;  	bacpy(&entry->bdaddr, bdaddr); +	entry->bdaddr_type = type;  	list_add(&entry->list, &hdev->blacklist); @@ -2010,10 +3364,12 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *entry; -	if (bacmp(bdaddr, BDADDR_ANY) == 0) -		return hci_blacklist_clear(hdev); +	if (!bacmp(bdaddr, BDADDR_ANY)) { +		hci_blacklist_clear(hdev); +		return 0; +	} -	entry = hci_blacklist_lookup(hdev, bdaddr); +	entry = hci_blacklist_lookup(hdev, bdaddr, type);  	if (!entry)  		return -ENOENT; @@ -2023,6 +3379,262 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  	return mgmt_device_unblocked(hdev, bdaddr, type);  } +struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev, +					  bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *b; + +	list_for_each_entry(b, &hdev->le_white_list, list) { +		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) +			return b; +	} + +	return NULL; +} + +void hci_white_list_clear(struct hci_dev *hdev) +{ +	struct list_head *p, *n; + +	list_for_each_safe(p, n, &hdev->le_white_list) { +		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); + +		list_del(p); +		kfree(b); +	} +} + +int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *entry; + +	if (!bacmp(bdaddr, BDADDR_ANY)) +		return -EBADF; + +	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); +	if (!entry) +		return -ENOMEM; + +	bacpy(&entry->bdaddr, bdaddr); +	entry->bdaddr_type = type; + +	list_add(&entry->list, &hdev->le_white_list); + +	return 0; +} + +int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *entry; + +	if (!bacmp(bdaddr, BDADDR_ANY)) +		return -EBADF; + +	entry = hci_white_list_lookup(hdev, bdaddr, type); +	if (!entry) +		return -ENOENT; + +	list_del(&entry->list); +	kfree(entry); + +	return 0; +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, +					       bdaddr_t *addr, u8 addr_type) +{ +	struct hci_conn_params *params; + +	list_for_each_entry(params, &hdev->le_conn_params, list) { +		if (bacmp(¶ms->addr, addr) == 0 && +		    params->addr_type == addr_type) { +			return params; +		} +	} + +	return NULL; +} + +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ +	struct hci_conn *conn; + +	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); +	if (!conn) +		return false; + +	if (conn->dst_type != type) +		return false; + +	if (conn->state != BT_CONNECTED) +		return false; + +	return true; +} + +static bool is_identity_address(bdaddr_t *addr, u8 addr_type) +{ +	if (addr_type == ADDR_LE_DEV_PUBLIC) +		return true; + +	/* Check for Random Static address type */ +	if ((addr->b[5] & 0xc0) == 0xc0) +		return true; + +	return false; +} + +/* This function requires the caller holds hdev->lock */ +int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, +			u8 auto_connect, u16 conn_min_interval, +			u16 conn_max_interval) +{ +	struct hci_conn_params *params; + +	if (!is_identity_address(addr, addr_type)) +		return -EINVAL; + +	params = hci_conn_params_lookup(hdev, addr, addr_type); +	if (params) +		goto update; + +	params = kzalloc(sizeof(*params), GFP_KERNEL); +	if (!params) { +		BT_ERR("Out of memory"); +		return -ENOMEM; +	} + +	bacpy(¶ms->addr, addr); +	params->addr_type = addr_type; + +	list_add(¶ms->list, &hdev->le_conn_params); + +update: +	params->conn_min_interval = conn_min_interval; +	params->conn_max_interval = conn_max_interval; +	params->auto_connect = auto_connect; + +	switch (auto_connect) { +	case HCI_AUTO_CONN_DISABLED: +	case HCI_AUTO_CONN_LINK_LOSS: +		hci_pend_le_conn_del(hdev, addr, addr_type); +		break; +	case HCI_AUTO_CONN_ALWAYS: +		if (!is_connected(hdev, addr, addr_type)) +			hci_pend_le_conn_add(hdev, addr, addr_type); +		break; +	} + +	BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " +	       "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, +	       conn_min_interval, conn_max_interval); + +	return 0; +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct hci_conn_params *params; + +	params = hci_conn_params_lookup(hdev, addr, addr_type); +	if (!params) +		return; + +	hci_pend_le_conn_del(hdev, addr, addr_type); + +	list_del(¶ms->list); +	kfree(params); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_clear(struct hci_dev *hdev) +{ +	struct hci_conn_params *params, *tmp; + +	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { +		list_del(¶ms->list); +		kfree(params); +	} + +	BT_DBG("All LE connection parameters were removed"); +} + +/* This function requires the caller holds hdev->lock */ +struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, +					    bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	list_for_each_entry(entry, &hdev->pend_le_conns, list) { +		if (bacmp(&entry->bdaddr, addr) == 0 && +		    entry->bdaddr_type == addr_type) +			return entry; +	} + +	return NULL; +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); +	if (entry) +		goto done; + +	entry = kzalloc(sizeof(*entry), GFP_KERNEL); +	if (!entry) { +		BT_ERR("Out of memory"); +		return; +	} + +	bacpy(&entry->bdaddr, addr); +	entry->bdaddr_type = addr_type; + +	list_add(&entry->list, &hdev->pend_le_conns); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: +	hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); +	if (!entry) +		goto done; + +	list_del(&entry->list); +	kfree(entry); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: +	hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conns_clear(struct hci_dev *hdev) +{ +	struct bdaddr_list *entry, *tmp; + +	list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) { +		list_del(&entry->list); +		kfree(entry); +	} + +	BT_DBG("All LE pending connections cleared"); +} +  static void inquiry_complete(struct hci_dev *hdev, u8 status)  {  	if (status) { @@ -2082,7 +3694,6 @@ static void le_scan_disable_work(struct work_struct *work)  {  	struct hci_dev *hdev = container_of(work, struct hci_dev,  					    le_scan_disable.work); -	struct hci_cp_le_set_scan_enable cp;  	struct hci_request req;  	int err; @@ -2090,15 +3701,128 @@ static void le_scan_disable_work(struct work_struct *work)  	hci_req_init(&req, hdev); -	memset(&cp, 0, sizeof(cp)); -	cp.enable = LE_SCAN_DISABLE; -	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +	hci_req_add_le_scan_disable(&req);  	err = hci_req_run(&req, le_scan_disable_work_complete);  	if (err)  		BT_ERR("Disable LE scanning request failed: err %d", err);  } +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) +{ +	struct hci_dev *hdev = req->hdev; + +	/* If we're advertising or initiating an LE connection we can't +	 * go ahead and change the random address at this time. This is +	 * because the eventual initiator address used for the +	 * subsequently created connection will be undefined (some +	 * controllers use the new address and others the one we had +	 * when the operation started). +	 * +	 * In this kind of scenario skip the update and let the random +	 * address be updated at the next cycle. +	 */ +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || +	    hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { +		BT_DBG("Deferring random address update"); +		return; +	} + +	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); +} + +int hci_update_random_address(struct hci_request *req, bool require_privacy, +			      u8 *own_addr_type) +{ +	struct hci_dev *hdev = req->hdev; +	int err; + +	/* If privacy is enabled use a resolvable private address. If +	 * current RPA has expired or there is something else than +	 * the current RPA in use, then generate a new one. +	 */ +	if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { +		int to; + +		*own_addr_type = ADDR_LE_DEV_RANDOM; + +		if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && +		    !bacmp(&hdev->random_addr, &hdev->rpa)) +			return 0; + +		err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); +		if (err < 0) { +			BT_ERR("%s failed to generate new RPA", hdev->name); +			return err; +		} + +		set_random_addr(req, &hdev->rpa); + +		to = msecs_to_jiffies(hdev->rpa_timeout * 1000); +		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); + +		return 0; +	} + +	/* In case of required privacy without resolvable private address, +	 * use an unresolvable private address. This is useful for active +	 * scanning and non-connectable advertising. +	 */ +	if (require_privacy) { +		bdaddr_t urpa; + +		get_random_bytes(&urpa, 6); +		urpa.b[5] &= 0x3f;	/* Clear two most significant bits */ + +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		set_random_addr(req, &urpa); +		return 0; +	} + +	/* If forcing static address is in use or there is no public +	 * address use the static address as random address (but skip +	 * the HCI command if the current random address is already the +	 * static one. +	 */ +	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || +	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) { +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		if (bacmp(&hdev->static_addr, &hdev->random_addr)) +			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, +				    &hdev->static_addr); +		return 0; +	} + +	/* Neither privacy nor static address is being used so use a +	 * public address. +	 */ +	*own_addr_type = ADDR_LE_DEV_PUBLIC; + +	return 0; +} + +/* Copy the Identity Address of the controller. + * + * If the controller has a public BD_ADDR, then by default use that one. + * If this is a LE only controller without a public address, default to + * the static random address. + * + * For debugging purposes it is possible to force controllers with a + * public address to use the static random address instead. + */ +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, +			       u8 *bdaddr_type) +{ +	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || +	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) { +		bacpy(bdaddr, &hdev->static_addr); +		*bdaddr_type = ADDR_LE_DEV_RANDOM; +	} else { +		bacpy(bdaddr, &hdev->bdaddr); +		*bdaddr_type = ADDR_LE_DEV_PUBLIC; +	} +} +  /* Alloc HCI device */  struct hci_dev *hci_alloc_dev(void)  { @@ -2111,13 +3835,25 @@ struct hci_dev *hci_alloc_dev(void)  	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);  	hdev->esco_type = (ESCO_HV1);  	hdev->link_mode = (HCI_LM_ACCEPT); -	hdev->io_capability = 0x03; /* No Input No Output */ +	hdev->num_iac = 0x01;		/* One IAC support is mandatory */ +	hdev->io_capability = 0x03;	/* No Input No Output */  	hdev->inq_tx_power = HCI_TX_POWER_INVALID;  	hdev->adv_tx_power = HCI_TX_POWER_INVALID;  	hdev->sniff_max_interval = 800;  	hdev->sniff_min_interval = 80; +	hdev->le_adv_channel_map = 0x07; +	hdev->le_scan_interval = 0x0060; +	hdev->le_scan_window = 0x0030; +	hdev->le_conn_min_interval = 0x0028; +	hdev->le_conn_max_interval = 0x0038; + +	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; +	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; +	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; +	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; +  	mutex_init(&hdev->lock);  	mutex_init(&hdev->req_lock); @@ -2126,7 +3862,11 @@ struct hci_dev *hci_alloc_dev(void)  	INIT_LIST_HEAD(&hdev->uuids);  	INIT_LIST_HEAD(&hdev->link_keys);  	INIT_LIST_HEAD(&hdev->long_term_keys); +	INIT_LIST_HEAD(&hdev->identity_resolving_keys);  	INIT_LIST_HEAD(&hdev->remote_oob_data); +	INIT_LIST_HEAD(&hdev->le_white_list); +	INIT_LIST_HEAD(&hdev->le_conn_params); +	INIT_LIST_HEAD(&hdev->pend_le_conns);  	INIT_LIST_HEAD(&hdev->conn_hash.list);  	INIT_WORK(&hdev->rx_work, hci_rx_work); @@ -2206,9 +3946,23 @@ int hci_register_dev(struct hci_dev *hdev)  		goto err;  	} -	error = hci_add_sysfs(hdev); -	if (error < 0) +	if (!IS_ERR_OR_NULL(bt_debugfs)) +		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + +	dev_set_name(&hdev->dev, "%s", hdev->name); + +	hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, +					       CRYPTO_ALG_ASYNC); +	if (IS_ERR(hdev->tfm_aes)) { +		BT_ERR("Unable to create crypto context"); +		error = PTR_ERR(hdev->tfm_aes); +		hdev->tfm_aes = NULL;  		goto err_wqueue; +	} + +	error = device_add(&hdev->dev); +	if (error < 0) +		goto err_tfm;  	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,  				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, @@ -2224,9 +3978,14 @@ int hci_register_dev(struct hci_dev *hdev)  		set_bit(HCI_RFKILLED, &hdev->dev_flags);  	set_bit(HCI_SETUP, &hdev->dev_flags); +	set_bit(HCI_AUTO_OFF, &hdev->dev_flags); -	if (hdev->dev_type != HCI_AMP) -		set_bit(HCI_AUTO_OFF, &hdev->dev_flags); +	if (hdev->dev_type == HCI_BREDR) { +		/* Assume BR/EDR support until proven otherwise (such as +		 * through reading supported features during init. +		 */ +		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); +	}  	write_lock(&hci_dev_list_lock);  	list_add(&hdev->list, &hci_dev_list); @@ -2239,6 +3998,8 @@ int hci_register_dev(struct hci_dev *hdev)  	return id; +err_tfm: +	crypto_free_blkcipher(hdev->tfm_aes);  err_wqueue:  	destroy_workqueue(hdev->workqueue);  	destroy_workqueue(hdev->req_workqueue); @@ -2289,7 +4050,12 @@ void hci_unregister_dev(struct hci_dev *hdev)  		rfkill_destroy(hdev->rfkill);  	} -	hci_del_sysfs(hdev); +	if (hdev->tfm_aes) +		crypto_free_blkcipher(hdev->tfm_aes); + +	device_del(&hdev->dev); + +	debugfs_remove_recursive(hdev->debugfs);  	destroy_workqueue(hdev->workqueue);  	destroy_workqueue(hdev->req_workqueue); @@ -2299,7 +4065,11 @@ void hci_unregister_dev(struct hci_dev *hdev)  	hci_uuids_clear(hdev);  	hci_link_keys_clear(hdev);  	hci_smp_ltks_clear(hdev); +	hci_smp_irks_clear(hdev);  	hci_remote_oob_data_clear(hdev); +	hci_white_list_clear(hdev); +	hci_conn_params_clear(hdev); +	hci_pend_le_conns_clear(hdev);  	hci_dev_unlock(hdev);  	hci_dev_put(hdev); @@ -2325,9 +4095,8 @@ int hci_resume_dev(struct hci_dev *hdev)  EXPORT_SYMBOL(hci_resume_dev);  /* Receive frame from HCI drivers */ -int hci_recv_frame(struct sk_buff *skb) +int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)  { -	struct hci_dev *hdev = (struct hci_dev *) skb->dev;  	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)  		      && !test_bit(HCI_INIT, &hdev->flags))) {  		kfree_skb(skb); @@ -2386,7 +4155,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,  		scb->expect = hlen;  		scb->pkt_type = type; -		skb->dev = (void *) hdev;  		hdev->reassembly[index] = skb;  	} @@ -2446,7 +4214,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,  			/* Complete frame */  			bt_cb(skb)->pkt_type = type; -			hci_recv_frame(skb); +			hci_recv_frame(hdev, skb);  			hdev->reassembly[index] = NULL;  			return remain; @@ -2537,15 +4305,8 @@ int hci_unregister_cb(struct hci_cb *cb)  }  EXPORT_SYMBOL(hci_unregister_cb); -static int hci_send_frame(struct sk_buff *skb) +static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)  { -	struct hci_dev *hdev = (struct hci_dev *) skb->dev; - -	if (!hdev) { -		kfree_skb(skb); -		return -ENODEV; -	} -  	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);  	/* Time stamp */ @@ -2562,7 +4323,8 @@ static int hci_send_frame(struct sk_buff *skb)  	/* Get rid of skb owner, prior to sending to the driver. */  	skb_orphan(skb); -	return hdev->send(skb); +	if (hdev->send(hdev, skb) < 0) +		BT_ERR("%s sending frame failed", hdev->name);  }  void hci_req_init(struct hci_request *req, struct hci_dev *hdev) @@ -2625,7 +4387,6 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,  	BT_DBG("skb len %d", skb->len);  	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; -	skb->dev = (void *) hdev;  	return skb;  } @@ -2769,7 +4530,6 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,  		do {  			skb = list; list = list->next; -			skb->dev = (void *) hdev;  			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;  			hci_add_acl_hdr(skb, conn->handle, flags); @@ -2788,8 +4548,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)  	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); -	skb->dev = (void *) hdev; -  	hci_queue_acl(chan, &chan->data_q, skb, flags);  	queue_work(hdev->workqueue, &hdev->tx_work); @@ -2810,7 +4568,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)  	skb_reset_transport_header(skb);  	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); -	skb->dev = (void *) hdev;  	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;  	skb_queue_tail(&conn->data_q, skb); @@ -3075,7 +4832,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)  			hci_conn_enter_active_mode(chan->conn,  						   bt_cb(skb)->force_active); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->acl_last_tx = jiffies;  			hdev->acl_cnt--; @@ -3127,7 +4884,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)  			hci_conn_enter_active_mode(chan->conn,  						   bt_cb(skb)->force_active); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->acl_last_tx = jiffies;  			hdev->block_cnt -= blocks; @@ -3180,7 +4937,7 @@ static void hci_sched_sco(struct hci_dev *hdev)  	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {  			BT_DBG("skb %p len %d", skb, skb->len); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			conn->sent++;  			if (conn->sent == ~0) @@ -3204,7 +4961,7 @@ static void hci_sched_esco(struct hci_dev *hdev)  						     "e))) {  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {  			BT_DBG("skb %p len %d", skb, skb->len); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			conn->sent++;  			if (conn->sent == ~0) @@ -3246,7 +5003,7 @@ static void hci_sched_le(struct hci_dev *hdev)  			skb = skb_dequeue(&chan->data_q); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->le_last_tx = jiffies;  			cnt--; @@ -3272,19 +5029,17 @@ static void hci_tx_work(struct work_struct *work)  	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,  	       hdev->sco_cnt, hdev->le_cnt); -	/* Schedule queues and send stuff to HCI driver */ - -	hci_sched_acl(hdev); - -	hci_sched_sco(hdev); - -	hci_sched_esco(hdev); - -	hci_sched_le(hdev); +	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		/* Schedule queues and send stuff to HCI driver */ +		hci_sched_acl(hdev); +		hci_sched_sco(hdev); +		hci_sched_esco(hdev); +		hci_sched_le(hdev); +	}  	/* Send next queued raw (unknown type) packet */  	while ((skb = skb_dequeue(&hdev->raw_q))) -		hci_send_frame(skb); +		hci_send_frame(hdev, skb);  }  /* ----- HCI RX task (incoming data processing) ----- */ @@ -3471,7 +5226,8 @@ static void hci_rx_work(struct work_struct *work)  			hci_send_to_sock(hdev, skb);  		} -		if (test_bit(HCI_RAW, &hdev->flags)) { +		if (test_bit(HCI_RAW, &hdev->flags) || +		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {  			kfree_skb(skb);  			continue;  		} @@ -3526,10 +5282,10 @@ static void hci_cmd_work(struct work_struct *work)  		kfree_skb(hdev->sent_cmd); -		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); +		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);  		if (hdev->sent_cmd) {  			atomic_dec(&hdev->cmd_cnt); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			if (test_bit(HCI_RESET, &hdev->flags))  				del_timer(&hdev->cmd_timer);  			else @@ -3542,14 +5298,103 @@ static void hci_cmd_work(struct work_struct *work)  	}  } -u8 bdaddr_to_le(u8 bdaddr_type) +void hci_req_add_le_scan_disable(struct hci_request *req)  { -	switch (bdaddr_type) { -	case BDADDR_LE_PUBLIC: -		return ADDR_LE_DEV_PUBLIC; +	struct hci_cp_le_set_scan_enable cp; -	default: -		/* Fallback to LE Random address type */ -		return ADDR_LE_DEV_RANDOM; +	memset(&cp, 0, sizeof(cp)); +	cp.enable = LE_SCAN_DISABLE; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +} + +void hci_req_add_le_passive_scan(struct hci_request *req) +{ +	struct hci_cp_le_set_scan_param param_cp; +	struct hci_cp_le_set_scan_enable enable_cp; +	struct hci_dev *hdev = req->hdev; +	u8 own_addr_type; + +	/* Set require_privacy to true to avoid identification from +	 * unknown peer devices. Since this is passive scanning, no +	 * SCAN_REQ using the local identity should be sent. Mandating +	 * privacy is just an extra precaution. +	 */ +	if (hci_update_random_address(req, true, &own_addr_type)) +		return; + +	memset(¶m_cp, 0, sizeof(param_cp)); +	param_cp.type = LE_SCAN_PASSIVE; +	param_cp.interval = cpu_to_le16(hdev->le_scan_interval); +	param_cp.window = cpu_to_le16(hdev->le_scan_window); +	param_cp.own_address_type = own_addr_type; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), +		    ¶m_cp); + +	memset(&enable_cp, 0, sizeof(enable_cp)); +	enable_cp.enable = LE_SCAN_ENABLE; +	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), +		    &enable_cp); +} + +static void update_background_scan_complete(struct hci_dev *hdev, u8 status) +{ +	if (status) +		BT_DBG("HCI request failed to update background scanning: " +		       "status 0x%2.2x", status); +} + +/* This function controls the background scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it. + * + * This function requires the caller holds hdev->lock. + */ +void hci_update_background_scan(struct hci_dev *hdev) +{ +	struct hci_request req; +	struct hci_conn *conn; +	int err; + +	hci_req_init(&req, hdev); + +	if (list_empty(&hdev->pend_le_conns)) { +		/* If there is no pending LE connections, we should stop +		 * the background scanning. +		 */ + +		/* If controller is not scanning we are done. */ +		if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			return; + +		hci_req_add_le_scan_disable(&req); + +		BT_DBG("%s stopping background scanning", hdev->name); +	} else { +		/* If there is at least one pending LE connection, we should +		 * keep the background scan running. +		 */ + +		/* If controller is connecting, we should not start scanning +		 * since some controllers are not able to scan and connect at +		 * the same time. +		 */ +		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); +		if (conn) +			return; + +		/* If controller is currently scanning, we stop it to ensure we +		 * don't miss any advertising (due to duplicates filter). +		 */ +		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			hci_req_add_le_scan_disable(&req); + +		hci_req_add_le_passive_scan(&req); + +		BT_DBG("%s starting background scanning", hdev->name);  	} + +	err = hci_req_run(&req, update_background_scan_complete); +	if (err) +		BT_ERR("Failed to run HCI request: err %d", err);  } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8db3e89fae3..640c54ec1bd 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -29,8 +29,9 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h>  #include <net/bluetooth/mgmt.h> -#include <net/bluetooth/a2mp.h> -#include <net/bluetooth/amp.h> + +#include "a2mp.h" +#include "amp.h"  /* Handle HCI Event packets */ @@ -44,9 +45,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)  		return;  	clear_bit(HCI_INQUIRY, &hdev->flags); -	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ +	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */  	wake_up_bit(&hdev->flags, HCI_INQUIRY); +	hci_dev_lock(hdev); +	hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +	hci_dev_unlock(hdev); +  	hci_conn_check_pending(hdev);  } @@ -194,6 +199,13 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)  	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));  	hdev->adv_data_len = 0; + +	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); +	hdev->scan_rsp_data_len = 0; + +	hdev->le_scan_type = LE_SCAN_PASSIVE; + +	hdev->ssp_debug_mode = 0;  }  static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -297,6 +309,11 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)  		goto done;  	} +	/* We need to ensure that we set this back on if someone changed +	 * the scan mode through a raw HCI socket. +	 */ +	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); +  	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);  	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); @@ -304,11 +321,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)  		set_bit(HCI_ISCAN, &hdev->flags);  		if (!old_iscan)  			mgmt_discoverable(hdev, 1); -		if (hdev->discov_timeout > 0) { -			int to = msecs_to_jiffies(hdev->discov_timeout * 1000); -			queue_delayed_work(hdev->workqueue, &hdev->discov_off, -					   to); -		}  	} else if (old_iscan)  		mgmt_discoverable(hdev, 0); @@ -412,6 +424,21 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev,  		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);  } +static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, +					  struct sk_buff *skb) +{ +	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; + +	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + +	if (rp->status) +		return; + +	hdev->num_iac = rp->num_iac; + +	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); +} +  static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)  {  	__u8 status = *((__u8 *) skb->data); @@ -440,6 +467,34 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)  	}  } +static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) +{ +	u8 status = *((u8 *) skb->data); +	struct hci_cp_write_sc_support *sent; + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); +	if (!sent) +		return; + +	if (!status) { +		if (sent->support) +			hdev->features[1][0] |= LMP_HOST_SC; +		else +			hdev->features[1][0] &= ~LMP_HOST_SC; +	} + +	if (test_bit(HCI_MGMT, &hdev->dev_flags)) +		mgmt_sc_enable_complete(hdev, sent->support, status); +	else if (!status) { +		if (sent->support) +			set_bit(HCI_SC_ENABLED, &hdev->dev_flags); +		else +			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); +	} +} +  static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)  {  	struct hci_rp_read_local_version *rp = (void *) skb->data; @@ -449,14 +504,13 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)  	if (rp->status)  		return; -	hdev->hci_ver = rp->hci_ver; -	hdev->hci_rev = __le16_to_cpu(rp->hci_rev); -	hdev->lmp_ver = rp->lmp_ver; -	hdev->manufacturer = __le16_to_cpu(rp->manufacturer); -	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); - -	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, -	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); +	if (test_bit(HCI_SETUP, &hdev->dev_flags)) { +		hdev->hci_ver = rp->hci_ver; +		hdev->hci_rev = __le16_to_cpu(rp->hci_rev); +		hdev->lmp_ver = rp->lmp_ver; +		hdev->manufacturer = __le16_to_cpu(rp->manufacturer); +		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); +	}  }  static void hci_cc_read_local_commands(struct hci_dev *hdev, @@ -466,7 +520,10 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,  	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); -	if (!rp->status) +	if (rp->status) +		return; + +	if (test_bit(HCI_SETUP, &hdev->dev_flags))  		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));  } @@ -518,12 +575,6 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,  	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)  		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); - -	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, -	       hdev->features[0][0], hdev->features[0][1], -	       hdev->features[0][2], hdev->features[0][3], -	       hdev->features[0][4], hdev->features[0][5], -	       hdev->features[0][6], hdev->features[0][7]);  }  static void hci_cc_read_local_ext_features(struct hci_dev *hdev, @@ -536,7 +587,8 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,  	if (rp->status)  		return; -	hdev->max_page = rp->max_page; +	if (hdev->max_page < rp->max_page) +		hdev->max_page = rp->max_page;  	if (rp->page < HCI_MAX_PAGES)  		memcpy(hdev->features[rp->page], rp->features, 8); @@ -886,16 +938,50 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,  	hci_dev_unlock(hdev);  } -static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, -					     struct sk_buff *skb) +static void hci_cc_read_local_oob_data(struct hci_dev *hdev, +				       struct sk_buff *skb)  {  	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;  	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);  	hci_dev_lock(hdev); -	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, -						rp->randomizer, rp->status); +	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer, +					  NULL, NULL, rp->status); +	hci_dev_unlock(hdev); +} + +static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, +					   struct sk_buff *skb) +{ +	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; + +	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + +	hci_dev_lock(hdev); +	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192, +					  rp->hash256, rp->randomizer256, +					  rp->status); +	hci_dev_unlock(hdev); +} + + +static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) +{ +	__u8 status = *((__u8 *) skb->data); +	bdaddr_t *sent; + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); +	if (!sent) +		return; + +	hci_dev_lock(hdev); + +	if (!status) +		bacpy(&hdev->random_addr, sent); +  	hci_dev_unlock(hdev);  } @@ -909,26 +995,75 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)  	if (!sent)  		return; +	if (status) +		return; +  	hci_dev_lock(hdev); -	if (!status) { -		if (*sent) -			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); -		else -			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); +	/* If we're doing connection initation as peripheral. Set a +	 * timeout in case something goes wrong. +	 */ +	if (*sent) { +		struct hci_conn *conn; + +		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); +		if (conn) +			queue_delayed_work(hdev->workqueue, +					   &conn->le_conn_timeout, +					   HCI_LE_CONN_TIMEOUT);  	} -	if (!test_bit(HCI_INIT, &hdev->flags)) { -		struct hci_request req; +	mgmt_advertising(hdev, *sent); -		hci_req_init(&req, hdev); -		hci_update_ad(&req); -		hci_req_run(&req, NULL); -	} +	hci_dev_unlock(hdev); +} + +static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) +{ +	struct hci_cp_le_set_scan_param *cp; +	__u8 status = *((__u8 *) skb->data); + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); +	if (!cp) +		return; + +	hci_dev_lock(hdev); + +	if (!status) +		hdev->le_scan_type = cp->type;  	hci_dev_unlock(hdev);  } +static bool has_pending_adv_report(struct hci_dev *hdev) +{ +	struct discovery_state *d = &hdev->discovery; + +	return bacmp(&d->last_adv_addr, BDADDR_ANY); +} + +static void clear_pending_adv_report(struct hci_dev *hdev) +{ +	struct discovery_state *d = &hdev->discovery; + +	bacpy(&d->last_adv_addr, BDADDR_ANY); +	d->last_adv_data_len = 0; +} + +static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, +				     u8 bdaddr_type, s8 rssi, u8 *data, u8 len) +{ +	struct discovery_state *d = &hdev->discovery; + +	bacpy(&d->last_adv_addr, bdaddr); +	d->last_adv_addr_type = bdaddr_type; +	d->last_adv_rssi = rssi; +	memcpy(d->last_adv_data, data, len); +	d->last_adv_data_len = len; +} +  static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,  				      struct sk_buff *skb)  { @@ -947,10 +1082,38 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,  	switch (cp->enable) {  	case LE_SCAN_ENABLE:  		set_bit(HCI_LE_SCAN, &hdev->dev_flags); +		if (hdev->le_scan_type == LE_SCAN_ACTIVE) +			clear_pending_adv_report(hdev);  		break;  	case LE_SCAN_DISABLE: +		/* We do this here instead of when setting DISCOVERY_STOPPED +		 * since the latter would potentially require waiting for +		 * inquiry to stop too. +		 */ +		if (has_pending_adv_report(hdev)) { +			struct discovery_state *d = &hdev->discovery; + +			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, +					  d->last_adv_addr_type, NULL, +					  d->last_adv_rssi, 0, 1, +					  d->last_adv_data, +					  d->last_adv_data_len, NULL, 0); +		} + +		/* Cancel this timer so that we don't try to disable scanning +		 * when it's already disabled. +		 */ +		cancel_delayed_work(&hdev->le_scan_disable); +  		clear_bit(HCI_LE_SCAN, &hdev->dev_flags); +		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we +		 * interrupted scanning due to a connect request. Mark +		 * therefore discovery as stopped. +		 */ +		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, +				       &hdev->dev_flags)) +			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);  		break;  	default: @@ -970,6 +1133,49 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,  		hdev->le_white_list_size = rp->size;  } +static void hci_cc_le_clear_white_list(struct hci_dev *hdev, +				       struct sk_buff *skb) +{ +	__u8 status = *((__u8 *) skb->data); + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	if (!status) +		hci_white_list_clear(hdev); +} + +static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, +					struct sk_buff *skb) +{ +	struct hci_cp_le_add_to_white_list *sent; +	__u8 status = *((__u8 *) skb->data); + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); +	if (!sent) +		return; + +	if (!status) +		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type); +} + +static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, +					  struct sk_buff *skb) +{ +	struct hci_cp_le_del_from_white_list *sent; +	__u8 status = *((__u8 *) skb->data); + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); +	if (!sent) +		return; + +	if (!status) +		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type); +} +  static void hci_cc_le_read_supported_states(struct hci_dev *hdev,  					    struct sk_buff *skb)  { @@ -994,20 +1200,39 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,  		return;  	if (!status) { -		if (sent->le) +		if (sent->le) {  			hdev->features[1][0] |= LMP_HOST_LE; -		else +			set_bit(HCI_LE_ENABLED, &hdev->dev_flags); +		} else {  			hdev->features[1][0] &= ~LMP_HOST_LE; +			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); +			clear_bit(HCI_ADVERTISING, &hdev->dev_flags); +		}  		if (sent->simul)  			hdev->features[1][0] |= LMP_HOST_LE_BREDR;  		else  			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;  	} +} + +static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) +{ +	struct hci_cp_le_set_adv_param *cp; +	u8 status = *((u8 *) skb->data); + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	if (status) +		return; -	if (test_bit(HCI_MGMT, &hdev->dev_flags) && -	    !test_bit(HCI_INIT, &hdev->flags)) -		mgmt_le_enable_complete(hdev, sent->le, status); +	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); +	if (!cp) +		return; + +	hci_dev_lock(hdev); +	hdev->adv_addr_type = cp->own_address_type; +	hci_dev_unlock(hdev);  }  static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, @@ -1024,6 +1249,59 @@ static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,  	amp_write_rem_assoc_continue(hdev, rp->phy_handle);  } +static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) +{ +	struct hci_rp_read_rssi *rp = (void *) skb->data; +	struct hci_conn *conn; + +	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + +	if (rp->status) +		return; + +	hci_dev_lock(hdev); + +	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); +	if (conn) +		conn->rssi = rp->rssi; + +	hci_dev_unlock(hdev); +} + +static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) +{ +	struct hci_cp_read_tx_power *sent; +	struct hci_rp_read_tx_power *rp = (void *) skb->data; +	struct hci_conn *conn; + +	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + +	if (rp->status) +		return; + +	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); +	if (!sent) +		return; + +	hci_dev_lock(hdev); + +	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); +	if (!conn) +		goto unlock; + +	switch (sent->type) { +	case 0x00: +		conn->tx_power = rp->tx_power; +		break; +	case 0x01: +		conn->max_tx_power = rp->tx_power; +		break; +	} + +unlock: +	hci_dev_unlock(hdev); +} +  static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)  {  	BT_DBG("%s status 0x%2.2x", hdev->name, status); @@ -1175,9 +1453,13 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,  		return 0;  	/* Only request authentication for SSP connections or non-SSP -	 * devices with sec_level HIGH or if MITM protection is requested */ +	 * devices with sec_level MEDIUM or HIGH or if MITM protection +	 * is requested. +	 */  	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && -	    conn->pending_sec_level != BT_SECURITY_HIGH) +	    conn->pending_sec_level != BT_SECURITY_FIPS && +	    conn->pending_sec_level != BT_SECURITY_HIGH && +	    conn->pending_sec_level != BT_SECURITY_MEDIUM)  		return 0;  	return 1; @@ -1291,9 +1573,11 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)  		goto unlock;  	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { -		struct hci_cp_auth_requested cp; -		cp.handle = __cpu_to_le16(conn->handle); -		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); +		struct hci_cp_auth_requested auth_cp; + +		auth_cp.handle = __cpu_to_le16(conn->handle); +		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, +			     sizeof(auth_cp), &auth_cp);  	}  unlock: @@ -1465,33 +1749,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)  	hci_dev_unlock(hdev);  } -static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) -{ -	struct hci_conn *conn; - -	BT_DBG("%s status 0x%2.2x", hdev->name, status); - -	if (status) { -		hci_dev_lock(hdev); - -		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); -		if (!conn) { -			hci_dev_unlock(hdev); -			return; -		} - -		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn); - -		conn->state = BT_CLOSED; -		mgmt_connect_failed(hdev, &conn->dst, conn->type, -				    conn->dst_type, status); -		hci_proto_connect_cfm(conn, status); -		hci_conn_del(conn); - -		hci_dev_unlock(hdev); -	} -} -  static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)  {  	struct hci_cp_create_phy_link *cp; @@ -1533,6 +1790,87 @@ static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)  	amp_write_remote_assoc(hdev, cp->phy_handle);  } +static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) +{ +	struct hci_cp_le_create_conn *cp; +	struct hci_conn *conn; + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	/* All connection failure handling is taken care of by the +	 * hci_le_conn_failed function which is triggered by the HCI +	 * request completion callbacks used for connecting. +	 */ +	if (status) +		return; + +	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); +	if (!cp) +		return; + +	hci_dev_lock(hdev); + +	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); +	if (!conn) +		goto unlock; + +	/* Store the initiator and responder address information which +	 * is needed for SMP. These values will not change during the +	 * lifetime of the connection. +	 */ +	conn->init_addr_type = cp->own_address_type; +	if (cp->own_address_type == ADDR_LE_DEV_RANDOM) +		bacpy(&conn->init_addr, &hdev->random_addr); +	else +		bacpy(&conn->init_addr, &hdev->bdaddr); + +	conn->resp_addr_type = cp->peer_addr_type; +	bacpy(&conn->resp_addr, &cp->peer_addr); + +	/* We don't want the connection attempt to stick around +	 * indefinitely since LE doesn't have a page timeout concept +	 * like BR/EDR. Set a timer for any connection that doesn't use +	 * the white list for connecting. +	 */ +	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) +		queue_delayed_work(conn->hdev->workqueue, +				   &conn->le_conn_timeout, +				   HCI_LE_CONN_TIMEOUT); + +unlock: +	hci_dev_unlock(hdev); +} + +static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) +{ +	struct hci_cp_le_start_enc *cp; +	struct hci_conn *conn; + +	BT_DBG("%s status 0x%2.2x", hdev->name, status); + +	if (!status) +		return; + +	hci_dev_lock(hdev); + +	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); +	if (!cp) +		goto unlock; + +	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); +	if (!conn) +		goto unlock; + +	if (conn->state != BT_CONNECTED) +		goto unlock; + +	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); +	hci_conn_drop(conn); + +unlock: +	hci_dev_unlock(hdev); +} +  static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  {  	__u8 status = *((__u8 *) skb->data); @@ -1546,7 +1884,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))  		return; -	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ +	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */  	wake_up_bit(&hdev->flags, HCI_INQUIRY);  	if (!test_bit(HCI_MGMT, &hdev->dev_flags)) @@ -1605,7 +1943,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)  		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);  		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  				  info->dev_class, 0, !name_known, ssp, NULL, -				  0); +				  0, NULL, 0);  	}  	hci_dev_unlock(hdev); @@ -1674,7 +2012,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  	} else {  		conn->state = BT_CLOSED;  		if (conn->type == ACL_LINK) -			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, +			mgmt_connect_failed(hdev, &conn->dst, conn->type,  					    conn->dst_type, ev->status);  	} @@ -1706,7 +2044,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  				      &flags);  	if ((mask & HCI_LM_ACCEPT) && -	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) { +	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {  		/* Connection accepted */  		struct inquiry_entry *ie;  		struct hci_conn *conn; @@ -1753,9 +2091,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  			bacpy(&cp.bdaddr, &ev->bdaddr);  			cp.pkt_type = cpu_to_le16(conn->pkt_type); -			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40); -			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40); -			cp.max_latency    = __constant_cpu_to_le16(0xffff); +			cp.tx_bandwidth   = cpu_to_le32(0x00001f40); +			cp.rx_bandwidth   = cpu_to_le32(0x00001f40); +			cp.max_latency    = cpu_to_le16(0xffff);  			cp.content_format = cpu_to_le16(hdev->voice_setting);  			cp.retrans_effort = 0xff; @@ -1794,7 +2132,11 @@ static u8 hci_to_mgmt_reason(u8 err)  static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  {  	struct hci_ev_disconn_complete *ev = (void *) skb->data; +	u8 reason = hci_to_mgmt_reason(ev->reason); +	struct hci_conn_params *params;  	struct hci_conn *conn; +	bool mgmt_connected; +	u8 type;  	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -1804,28 +2146,55 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  	if (!conn)  		goto unlock; -	if (ev->status == 0) -		conn->state = BT_CLOSED; +	if (ev->status) { +		mgmt_disconnect_failed(hdev, &conn->dst, conn->type, +				       conn->dst_type, ev->status); +		goto unlock; +	} -	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && -	    (conn->type == ACL_LINK || conn->type == LE_LINK)) { -		if (ev->status) { -			mgmt_disconnect_failed(hdev, &conn->dst, conn->type, -					       conn->dst_type, ev->status); -		} else { -			u8 reason = hci_to_mgmt_reason(ev->reason); +	conn->state = BT_CLOSED; + +	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); +	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, +				reason, mgmt_connected); + +	if (conn->type == ACL_LINK && conn->flush_key) +		hci_remove_link_key(hdev, &conn->dst); + +	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); +	if (params) { +		switch (params->auto_connect) { +		case HCI_AUTO_CONN_LINK_LOSS: +			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) +				break; +			/* Fall through */ + +		case HCI_AUTO_CONN_ALWAYS: +			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type); +			break; -			mgmt_device_disconnected(hdev, &conn->dst, conn->type, -						 conn->dst_type, reason); +		default: +			break;  		}  	} -	if (ev->status == 0) { -		if (conn->type == ACL_LINK && conn->flush_key) -			hci_remove_link_key(hdev, &conn->dst); -		hci_proto_disconn_cfm(conn, ev->reason); -		hci_conn_del(conn); -	} +	type = conn->type; + +	hci_proto_disconn_cfm(conn, ev->reason); +	hci_conn_del(conn); + +	/* Re-enable advertising if necessary, since it might +	 * have been disabled by the connection. From the +	 * HCI_LE_Set_Advertise_Enable command description in +	 * the core specification (v4.0): +	 * "The Controller shall continue advertising until the Host +	 * issues an LE_Set_Advertise_Enable command with +	 * Advertising_Enable set to 0x00 (Advertising is disabled) +	 * or until a connection is created or until the Advertising +	 * is timed out due to Directed Advertising." +	 */ +	if (type == LE_LINK) +		mgmt_reenable_advertising(hdev);  unlock:  	hci_dev_unlock(hdev); @@ -1946,34 +2315,57 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)  	hci_dev_lock(hdev);  	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); -	if (conn) { -		if (!ev->status) { -			if (ev->encrypt) { -				/* Encryption implies authentication */ -				conn->link_mode |= HCI_LM_AUTH; -				conn->link_mode |= HCI_LM_ENCRYPT; -				conn->sec_level = conn->pending_sec_level; -			} else -				conn->link_mode &= ~HCI_LM_ENCRYPT; +	if (!conn) +		goto unlock; + +	if (!ev->status) { +		if (ev->encrypt) { +			/* Encryption implies authentication */ +			conn->link_mode |= HCI_LM_AUTH; +			conn->link_mode |= HCI_LM_ENCRYPT; +			conn->sec_level = conn->pending_sec_level; + +			/* P-256 authentication key implies FIPS */ +			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) +				conn->link_mode |= HCI_LM_FIPS; + +			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || +			    conn->type == LE_LINK) +				set_bit(HCI_CONN_AES_CCM, &conn->flags); +		} else { +			conn->link_mode &= ~HCI_LM_ENCRYPT; +			clear_bit(HCI_CONN_AES_CCM, &conn->flags);  		} +	} -		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); +	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + +	if (ev->status && conn->state == BT_CONNECTED) { +		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); +		hci_conn_drop(conn); +		goto unlock; +	} -		if (ev->status && conn->state == BT_CONNECTED) { -			hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); +	if (conn->state == BT_CONFIG) { +		if (!ev->status) +			conn->state = BT_CONNECTED; + +		/* In Secure Connections Only mode, do not allow any +		 * connections that are not encrypted with AES-CCM +		 * using a P-256 authenticated combination key. +		 */ +		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && +		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || +		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { +			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);  			hci_conn_drop(conn);  			goto unlock;  		} -		if (conn->state == BT_CONFIG) { -			if (!ev->status) -				conn->state = BT_CONNECTED; - -			hci_proto_connect_cfm(conn, ev->status); -			hci_conn_drop(conn); -		} else -			hci_encrypt_cfm(conn, ev->status, ev->encrypt); -	} +		hci_proto_connect_cfm(conn, ev->status); +		hci_conn_drop(conn); +	} else +		hci_encrypt_cfm(conn, ev->status, ev->encrypt);  unlock:  	hci_dev_unlock(hdev); @@ -2139,10 +2531,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cc_write_voice_setting(hdev, skb);  		break; +	case HCI_OP_READ_NUM_SUPPORTED_IAC: +		hci_cc_read_num_supported_iac(hdev, skb); +		break; +  	case HCI_OP_WRITE_SSP_MODE:  		hci_cc_write_ssp_mode(hdev, skb);  		break; +	case HCI_OP_WRITE_SC_SUPPORT: +		hci_cc_write_sc_support(hdev, skb); +		break; +  	case HCI_OP_READ_LOCAL_VERSION:  		hci_cc_read_local_version(hdev, skb);  		break; @@ -2212,7 +2612,11 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		break;  	case HCI_OP_READ_LOCAL_OOB_DATA: -		hci_cc_read_local_oob_data_reply(hdev, skb); +		hci_cc_read_local_oob_data(hdev, skb); +		break; + +	case HCI_OP_READ_LOCAL_OOB_EXT_DATA: +		hci_cc_read_local_oob_ext_data(hdev, skb);  		break;  	case HCI_OP_LE_READ_BUFFER_SIZE: @@ -2243,10 +2647,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cc_user_passkey_neg_reply(hdev, skb);  		break; +	case HCI_OP_LE_SET_RANDOM_ADDR: +		hci_cc_le_set_random_addr(hdev, skb); +		break; +  	case HCI_OP_LE_SET_ADV_ENABLE:  		hci_cc_le_set_adv_enable(hdev, skb);  		break; +	case HCI_OP_LE_SET_SCAN_PARAM: +		hci_cc_le_set_scan_param(hdev, skb); +		break; +  	case HCI_OP_LE_SET_SCAN_ENABLE:  		hci_cc_le_set_scan_enable(hdev, skb);  		break; @@ -2255,6 +2667,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cc_le_read_white_list_size(hdev, skb);  		break; +	case HCI_OP_LE_CLEAR_WHITE_LIST: +		hci_cc_le_clear_white_list(hdev, skb); +		break; + +	case HCI_OP_LE_ADD_TO_WHITE_LIST: +		hci_cc_le_add_to_white_list(hdev, skb); +		break; + +	case HCI_OP_LE_DEL_FROM_WHITE_LIST: +		hci_cc_le_del_from_white_list(hdev, skb); +		break; +  	case HCI_OP_LE_READ_SUPPORTED_STATES:  		hci_cc_le_read_supported_states(hdev, skb);  		break; @@ -2263,10 +2687,22 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cc_write_le_host_supported(hdev, skb);  		break; +	case HCI_OP_LE_SET_ADV_PARAM: +		hci_cc_set_adv_param(hdev, skb); +		break; +  	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:  		hci_cc_write_remote_amp_assoc(hdev, skb);  		break; +	case HCI_OP_READ_RSSI: +		hci_cc_read_rssi(hdev, skb); +		break; + +	case HCI_OP_READ_TX_POWER: +		hci_cc_read_tx_power(hdev, skb); +		break; +  	default:  		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);  		break; @@ -2342,10 +2778,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cs_disconnect(hdev, ev->status);  		break; -	case HCI_OP_LE_CREATE_CONN: -		hci_cs_le_create_conn(hdev, ev->status); -		break; -  	case HCI_OP_CREATE_PHY_LINK:  		hci_cs_create_phylink(hdev, ev->status);  		break; @@ -2354,6 +2786,14 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)  		hci_cs_accept_phylink(hdev, ev->status);  		break; +	case HCI_OP_LE_CREATE_CONN: +		hci_cs_le_create_conn(hdev, ev->status); +		break; + +	case HCI_OP_LE_START_ENC: +		hci_cs_le_start_enc(hdev, ev->status); +		break; +  	default:  		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);  		break; @@ -2548,7 +2988,6 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)  	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));  	if (conn) {  		conn->mode = ev->mode; -		conn->interval = __le16_to_cpu(ev->interval);  		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,  					&conn->flags)) { @@ -2634,14 +3073,16 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);  	if (conn) { -		if (key->type == HCI_LK_UNAUTH_COMBINATION && +		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || +		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&  		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {  			BT_DBG("%s ignoring unauthenticated key", hdev->name);  			goto not_found;  		}  		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && -		    conn->pending_sec_level == BT_SECURITY_HIGH) { +		    (conn->pending_sec_level == BT_SECURITY_HIGH || +		     conn->pending_sec_level == BT_SECURITY_FIPS)) {  			BT_DBG("%s ignoring key unauthenticated for high security",  			       hdev->name);  			goto not_found; @@ -2786,7 +3227,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,  							      false, &ssp);  			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  					  info->dev_class, info->rssi, -					  !name_known, ssp, NULL, 0); +					  !name_known, ssp, NULL, 0, NULL, 0);  		}  	} else {  		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); @@ -2804,7 +3245,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,  							      false, &ssp);  			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  					  info->dev_class, info->rssi, -					  !name_known, ssp, NULL, 0); +					  !name_known, ssp, NULL, 0, NULL, 0);  		}  	} @@ -2848,6 +3289,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,  			 * features do not indicate SSP support */  			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);  		} + +		if (ev->features[0] & LMP_HOST_SC) +			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);  	}  	if (conn->state != BT_CONFIG) @@ -2909,6 +3353,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,  	case 0x1c:	/* SCO interval rejected */  	case 0x1a:	/* Unsupported Remote Feature */  	case 0x1f:	/* Unspecified error */ +	case 0x20:	/* Unsupported LMP Parameter value */  		if (conn->out) {  			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |  					(hdev->esco_type & EDR_ESCO_MASK); @@ -2930,6 +3375,23 @@ unlock:  	hci_dev_unlock(hdev);  } +static inline size_t eir_get_length(u8 *eir, size_t eir_len) +{ +	size_t parsed = 0; + +	while (parsed < eir_len) { +		u8 field_len = eir[0]; + +		if (field_len == 0) +			return parsed; + +		parsed += field_len + 1; +		eir += field_len + 1; +	} + +	return eir_len; +} +  static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,  					    struct sk_buff *skb)  { @@ -2972,7 +3434,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,  		eir_len = eir_get_length(info->data, sizeof(info->data));  		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  				  info->dev_class, info->rssi, !name_known, -				  ssp, info->data, eir_len); +				  ssp, info->data, eir_len, NULL, 0);  	}  	hci_dev_unlock(hdev); @@ -2993,6 +3455,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,  	if (!conn)  		goto unlock; +	/* For BR/EDR the necessary steps are taken through the +	 * auth_complete event. +	 */ +	if (conn->type != LE_LINK) +		goto unlock; +  	if (!ev->status)  		conn->sec_level = conn->pending_sec_level; @@ -3024,24 +3492,20 @@ unlock:  static u8 hci_get_auth_req(struct hci_conn *conn)  { -	/* If remote requests dedicated bonding follow that lead */ -	if (conn->remote_auth == HCI_AT_DEDICATED_BONDING || -	    conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) { -		/* If both remote and local IO capabilities allow MITM -		 * protection then require it, otherwise don't */ -		if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT || -		    conn->io_capability == HCI_IO_NO_INPUT_OUTPUT) -			return HCI_AT_DEDICATED_BONDING; -		else -			return HCI_AT_DEDICATED_BONDING_MITM; -	} -  	/* If remote requests no-bonding follow that lead */  	if (conn->remote_auth == HCI_AT_NO_BONDING ||  	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)  		return conn->remote_auth | (conn->auth_type & 0x01); -	return conn->auth_type; +	/* If both remote and local have enough IO capabilities, require +	 * MITM protection +	 */ +	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && +	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) +		return conn->remote_auth | 0x01; + +	/* No MITM protection possible so ignore remote requirement */ +	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);  }  static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -3071,8 +3535,25 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  		 * to DisplayYesNo as it is not supported by BT spec. */  		cp.capability = (conn->io_capability == 0x04) ?  				HCI_IO_DISPLAY_YESNO : conn->io_capability; -		conn->auth_type = hci_get_auth_req(conn); -		cp.authentication = conn->auth_type; + +		/* If we are initiators, there is no remote information yet */ +		if (conn->remote_auth == 0xff) { +			cp.authentication = conn->auth_type; + +			/* Request MITM protection if our IO caps allow it +			 * except for the no-bonding case. +			 * conn->auth_type is not updated here since +			 * that might cause the user confirmation to be +			 * rejected in case the remote doesn't have the +			 * IO capabilities for MITM. +			 */ +			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && +			    cp.authentication != HCI_AT_NO_BONDING) +				cp.authentication |= 0x01; +		} else { +			conn->auth_type = hci_get_auth_req(conn); +			cp.authentication = conn->auth_type; +		}  		if (hci_find_remote_oob_data(hdev, &conn->dst) &&  		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) @@ -3140,12 +3621,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,  	rem_mitm = (conn->remote_auth & 0x01);  	/* If we require MITM but the remote device can't provide that -	 * (it has NoInputNoOutput) then reject the confirmation -	 * request. The only exception is when we're dedicated bonding -	 * initiators (connect_cfm_cb set) since then we always have the MITM -	 * bit set. */ -	if (!conn->connect_cfm_cb && loc_mitm && -	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { +	 * (it has NoInputNoOutput) then reject the confirmation request +	 */ +	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {  		BT_DBG("Rejecting request: remote device can't provide MITM");  		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,  			     sizeof(ev->bdaddr), &ev->bdaddr); @@ -3158,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,  		/* If we're not the initiators request authorization to  		 * proceed from user space (mgmt_user_confirm with -		 * confirm_hint set to 1). */ -		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { +		 * confirm_hint set to 1). The exception is if neither +		 * side had MITM in which case we do auto-accept. +		 */ +		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && +		    (loc_mitm || rem_mitm)) {  			BT_DBG("Confirming auto-accept as acceptor");  			confirm_hint = 1;  			goto confirm; @@ -3170,7 +3651,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,  		if (hdev->auto_accept_delay > 0) {  			int delay = msecs_to_jiffies(hdev->auto_accept_delay); -			mod_timer(&conn->auto_accept_timer, jiffies + delay); +			queue_delayed_work(conn->hdev->workqueue, +					   &conn->auto_accept_work, delay);  			goto unlock;  		} @@ -3180,8 +3662,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,  	}  confirm: -	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, -				  confirm_hint); +	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, +				  le32_to_cpu(ev->passkey), confirm_hint);  unlock:  	hci_dev_unlock(hdev); @@ -3323,20 +3805,36 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,  	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);  	if (data) { -		struct hci_cp_remote_oob_data_reply cp; +		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { +			struct hci_cp_remote_oob_ext_data_reply cp; -		bacpy(&cp.bdaddr, &ev->bdaddr); -		memcpy(cp.hash, data->hash, sizeof(cp.hash)); -		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); +			bacpy(&cp.bdaddr, &ev->bdaddr); +			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); +			memcpy(cp.randomizer192, data->randomizer192, +			       sizeof(cp.randomizer192)); +			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); +			memcpy(cp.randomizer256, data->randomizer256, +			       sizeof(cp.randomizer256)); + +			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, +				     sizeof(cp), &cp); +		} else { +			struct hci_cp_remote_oob_data_reply cp; -		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), -			     &cp); +			bacpy(&cp.bdaddr, &ev->bdaddr); +			memcpy(cp.hash, data->hash192, sizeof(cp.hash)); +			memcpy(cp.randomizer, data->randomizer192, +			       sizeof(cp.randomizer)); + +			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, +				     sizeof(cp), &cp); +		}  	} else {  		struct hci_cp_remote_oob_data_neg_reply cp;  		bacpy(&cp.bdaddr, &ev->bdaddr); -		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), -			     &cp); +		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, +			     sizeof(cp), &cp);  	}  unlock: @@ -3470,6 +3968,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  {  	struct hci_ev_le_conn_complete *ev = (void *) skb->data;  	struct hci_conn *conn; +	struct smp_irk *irk;  	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -3489,48 +3988,219 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  			conn->out = true;  			conn->link_mode |= HCI_LM_MASTER;  		} + +		/* If we didn't have a hci_conn object previously +		 * but we're in master role this must be something +		 * initiated using a white list. Since white list based +		 * connections are not "first class citizens" we don't +		 * have full tracking of them. Therefore, we go ahead +		 * with a "best effort" approach of determining the +		 * initiator address based on the HCI_PRIVACY flag. +		 */ +		if (conn->out) { +			conn->resp_addr_type = ev->bdaddr_type; +			bacpy(&conn->resp_addr, &ev->bdaddr); +			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { +				conn->init_addr_type = ADDR_LE_DEV_RANDOM; +				bacpy(&conn->init_addr, &hdev->rpa); +			} else { +				hci_copy_identity_address(hdev, +							  &conn->init_addr, +							  &conn->init_addr_type); +			} +		} +	} else { +		cancel_delayed_work(&conn->le_conn_timeout); +	} + +	if (!conn->out) { +		/* Set the responder (our side) address type based on +		 * the advertising address type. +		 */ +		conn->resp_addr_type = hdev->adv_addr_type; +		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) +			bacpy(&conn->resp_addr, &hdev->random_addr); +		else +			bacpy(&conn->resp_addr, &hdev->bdaddr); + +		conn->init_addr_type = ev->bdaddr_type; +		bacpy(&conn->init_addr, &ev->bdaddr); +	} + +	/* Lookup the identity address from the stored connection +	 * address and address type. +	 * +	 * When establishing connections to an identity address, the +	 * connection procedure will store the resolvable random +	 * address first. Now if it can be converted back into the +	 * identity address, start using the identity address from +	 * now on. +	 */ +	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); +	if (irk) { +		bacpy(&conn->dst, &irk->bdaddr); +		conn->dst_type = irk->addr_type;  	}  	if (ev->status) { -		mgmt_connect_failed(hdev, &conn->dst, conn->type, -				    conn->dst_type, ev->status); -		hci_proto_connect_cfm(conn, ev->status); -		conn->state = BT_CLOSED; -		hci_conn_del(conn); +		hci_le_conn_failed(conn, ev->status);  		goto unlock;  	}  	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) -		mgmt_device_connected(hdev, &ev->bdaddr, conn->type, +		mgmt_device_connected(hdev, &conn->dst, conn->type,  				      conn->dst_type, 0, NULL, 0, NULL);  	conn->sec_level = BT_SECURITY_LOW;  	conn->handle = __le16_to_cpu(ev->handle);  	conn->state = BT_CONNECTED; +	if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) +		set_bit(HCI_CONN_6LOWPAN, &conn->flags); +  	hci_conn_add_sysfs(conn);  	hci_proto_connect_cfm(conn, ev->status); +	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type); +  unlock:  	hci_dev_unlock(hdev);  } +/* This function requires the caller holds hdev->lock */ +static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, +				  u8 addr_type) +{ +	struct hci_conn *conn; +	struct smp_irk *irk; + +	/* If this is a resolvable address, we should resolve it and then +	 * update address and address type variables. +	 */ +	irk = hci_get_irk(hdev, addr, addr_type); +	if (irk) { +		addr = &irk->bdaddr; +		addr_type = irk->addr_type; +	} + +	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type)) +		return; + +	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, +			      HCI_AT_NO_BONDING); +	if (!IS_ERR(conn)) +		return; + +	switch (PTR_ERR(conn)) { +	case -EBUSY: +		/* If hci_connect() returns -EBUSY it means there is already +		 * an LE connection attempt going on. Since controllers don't +		 * support more than one connection attempt at the time, we +		 * don't consider this an error case. +		 */ +		break; +	default: +		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); +	} +} + +static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, +			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len) +{ +	struct discovery_state *d = &hdev->discovery; +	bool match; + +	/* Passive scanning shouldn't trigger any device found events */ +	if (hdev->le_scan_type == LE_SCAN_PASSIVE) { +		if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND) +			check_pending_le_conn(hdev, bdaddr, bdaddr_type); +		return; +	} + +	/* If there's nothing pending either store the data from this +	 * event or send an immediate device found event if the data +	 * should not be stored for later. +	 */ +	if (!has_pending_adv_report(hdev)) { +		/* If the report will trigger a SCAN_REQ store it for +		 * later merging. +		 */ +		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { +			store_pending_adv_report(hdev, bdaddr, bdaddr_type, +						 rssi, data, len); +			return; +		} + +		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, +				  rssi, 0, 1, data, len, NULL, 0); +		return; +	} + +	/* Check if the pending report is for the same device as the new one */ +	match = (!bacmp(bdaddr, &d->last_adv_addr) && +		 bdaddr_type == d->last_adv_addr_type); + +	/* If the pending data doesn't match this report or this isn't a +	 * scan response (e.g. we got a duplicate ADV_IND) then force +	 * sending of the pending data. +	 */ +	if (type != LE_ADV_SCAN_RSP || !match) { +		/* Send out whatever is in the cache, but skip duplicates */ +		if (!match) +			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, +					  d->last_adv_addr_type, NULL, +					  d->last_adv_rssi, 0, 1, +					  d->last_adv_data, +					  d->last_adv_data_len, NULL, 0); + +		/* If the new report will trigger a SCAN_REQ store it for +		 * later merging. +		 */ +		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { +			store_pending_adv_report(hdev, bdaddr, bdaddr_type, +						 rssi, data, len); +			return; +		} + +		/* The advertising reports cannot be merged, so clear +		 * the pending report and send out a device found event. +		 */ +		clear_pending_adv_report(hdev); +		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, +				  rssi, 0, 1, data, len, NULL, 0); +		return; +	} + +	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and +	 * the new event is a SCAN_RSP. We can therefore proceed with +	 * sending a merged device found event. +	 */ +	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, +			  d->last_adv_addr_type, NULL, rssi, 0, 1, data, len, +			  d->last_adv_data, d->last_adv_data_len); +	clear_pending_adv_report(hdev); +} +  static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)  {  	u8 num_reports = skb->data[0];  	void *ptr = &skb->data[1]; -	s8 rssi; + +	hci_dev_lock(hdev);  	while (num_reports--) {  		struct hci_ev_le_advertising_info *ev = ptr; +		s8 rssi;  		rssi = ev->data[ev->length]; -		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, -				  NULL, rssi, 0, 1, ev->data, ev->length); +		process_adv_report(hdev, ev->evt_type, &ev->bdaddr, +				   ev->bdaddr_type, rssi, ev->data, ev->length);  		ptr += sizeof(*ev) + ev->length + 1;  	} + +	hci_dev_unlock(hdev);  }  static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -3549,7 +4219,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  	if (conn == NULL)  		goto not_found; -	ltk = hci_find_ltk(hdev, ev->ediv, ev->random); +	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);  	if (ltk == NULL)  		goto not_found; @@ -3565,7 +4235,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)  	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); -	if (ltk->type & HCI_SMP_STK) { +	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a +	 * temporary key used to encrypt a connection following +	 * pairing. It is used during the Encrypted Session Setup to +	 * distribute the keys. Later, security can be re-established +	 * using a distributed LTK. +	 */ +	if (ltk->type == HCI_SMP_STK_SLAVE) {  		list_del(<k->list);  		kfree(ltk);  	} @@ -3640,8 +4316,8 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)  	skb_pull(skb, HCI_EVENT_HDR_SIZE);  	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { -		struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data; -		u16 opcode = __le16_to_cpu(hdr->opcode); +		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; +		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);  		hci_req_cmd_complete(hdev, opcode, 0);  	} diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 9bd7d959e38..80d25c150a6 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -66,6 +66,46 @@ static struct bt_sock_list hci_sk_list = {  	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)  }; +static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) +{ +	struct hci_filter *flt; +	int flt_type, flt_event; + +	/* Apply filter */ +	flt = &hci_pi(sk)->filter; + +	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) +		flt_type = 0; +	else +		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS; + +	if (!test_bit(flt_type, &flt->type_mask)) +		return true; + +	/* Extra filter for event packets only */ +	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT) +		return false; + +	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + +	if (!hci_test_bit(flt_event, &flt->event_mask)) +		return true; + +	/* Check filter only when opcode is set */ +	if (!flt->opcode) +		return false; + +	if (flt_event == HCI_EV_CMD_COMPLETE && +	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) +		return true; + +	if (flt_event == HCI_EV_CMD_STATUS && +	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4))) +		return true; + +	return false; +} +  /* Send frame to RAW socket */  void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)  { @@ -77,7 +117,6 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)  	read_lock(&hci_sk_list.lock);  	sk_for_each(sk, &hci_sk_list.head) { -		struct hci_filter *flt;  		struct sk_buff *nskb;  		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) @@ -87,36 +126,24 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)  		if (skb->sk == sk)  			continue; -		if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) -			continue; - -		/* Apply filter */ -		flt = &hci_pi(sk)->filter; - -		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? -			      0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), -			      &flt->type_mask)) -			continue; - -		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { -			int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); - -			if (!hci_test_bit(evt, &flt->event_mask)) +		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { +			if (is_filtered_packet(sk, skb))  				continue; - -			if (flt->opcode && -			    ((evt == HCI_EV_CMD_COMPLETE && -			      flt->opcode != -			      get_unaligned((__le16 *)(skb->data + 3))) || -			     (evt == HCI_EV_CMD_STATUS && -			      flt->opcode != -			      get_unaligned((__le16 *)(skb->data + 4))))) +		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { +			if (!bt_cb(skb)->incoming) +				continue; +			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT && +			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && +			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)  				continue; +		} else { +			/* Don't send frame to other channel types */ +			continue;  		}  		if (!skb_copy) {  			/* Create a private copy with headroom */ -			skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); +			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);  			if (!skb_copy)  				continue; @@ -184,22 +211,22 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)  	switch (bt_cb(skb)->pkt_type) {  	case HCI_COMMAND_PKT: -		opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT); +		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);  		break;  	case HCI_EVENT_PKT: -		opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT); +		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);  		break;  	case HCI_ACLDATA_PKT:  		if (bt_cb(skb)->incoming) -			opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT); +			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);  		else -			opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT); +			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);  		break;  	case HCI_SCODATA_PKT:  		if (bt_cb(skb)->incoming) -			opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT); +			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);  		else -			opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT); +			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);  		break;  	default:  		return; @@ -220,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)  			struct hci_mon_hdr *hdr;  			/* Create a private copy with headroom */ -			skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, -					       GFP_ATOMIC); +			skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, +						      GFP_ATOMIC, true);  			if (!skb_copy)  				continue; @@ -292,7 +319,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)  		bacpy(&ni->bdaddr, &hdev->bdaddr);  		memcpy(ni->name, hdev->name, 8); -		opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX); +		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);  		break;  	case HCI_DEV_UNREG: @@ -300,7 +327,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)  		if (!skb)  			return NULL; -		opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX); +		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);  		break;  	default: @@ -360,7 +387,6 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)  	__net_timestamp(skb);  	bt_cb(skb)->pkt_type = HCI_EVENT_PKT; -	skb->dev = (void *) hdev;  	hci_send_to_sock(hdev, skb);  	kfree_skb(skb);  } @@ -426,6 +452,12 @@ static int hci_sock_release(struct socket *sock)  	bt_sock_unlink(&hci_sk_list, sk);  	if (hdev) { +		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { +			mgmt_index_added(hdev); +			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); +			hci_dev_close(hdev->id); +		} +  		atomic_dec(&hdev->promisc);  		hci_dev_put(hdev);  	} @@ -449,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)  	hci_dev_lock(hdev); -	err = hci_blacklist_add(hdev, &bdaddr, 0); +	err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);  	hci_dev_unlock(hdev); @@ -466,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)  	hci_dev_lock(hdev); -	err = hci_blacklist_del(hdev, &bdaddr, 0); +	err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);  	hci_dev_unlock(hdev); @@ -482,20 +514,17 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,  	if (!hdev)  		return -EBADFD; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) +		return -EBUSY; + +	if (hdev->dev_type != HCI_BREDR) +		return -EOPNOTSUPP; +  	switch (cmd) {  	case HCISETRAW:  		if (!capable(CAP_NET_ADMIN))  			return -EPERM; - -		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) -			return -EPERM; - -		if (arg) -			set_bit(HCI_RAW, &hdev->flags); -		else -			clear_bit(HCI_RAW, &hdev->flags); - -		return 0; +		return -EOPNOTSUPP;  	case HCIGETCONNINFO:  		return hci_get_conn_info(hdev, (void __user *) arg); @@ -512,23 +541,29 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,  		if (!capable(CAP_NET_ADMIN))  			return -EPERM;  		return hci_sock_blacklist_del(hdev, (void __user *) arg); - -	default: -		if (hdev->ioctl) -			return hdev->ioctl(hdev, cmd, arg); -		return -EINVAL;  	} + +	return -ENOIOCTLCMD;  }  static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,  			  unsigned long arg)  { -	struct sock *sk = sock->sk;  	void __user *argp = (void __user *) arg; +	struct sock *sk = sock->sk;  	int err;  	BT_DBG("cmd %x arg %lx", cmd, arg); +	lock_sock(sk); + +	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { +		err = -EBADFD; +		goto done; +	} + +	release_sock(sk); +  	switch (cmd) {  	case HCIGETDEVLIST:  		return hci_get_dev_list(argp); @@ -573,13 +608,15 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,  	case HCIINQUIRY:  		return hci_inquiry(argp); - -	default: -		lock_sock(sk); -		err = hci_sock_bound_ioctl(sk, cmd, arg); -		release_sock(sk); -		return err;  	} + +	lock_sock(sk); + +	err = hci_sock_bound_ioctl(sk, cmd, arg); + +done: +	release_sock(sk); +	return err;  }  static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, @@ -629,6 +666,57 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,  		hci_pi(sk)->hdev = hdev;  		break; +	case HCI_CHANNEL_USER: +		if (hci_pi(sk)->hdev) { +			err = -EALREADY; +			goto done; +		} + +		if (haddr.hci_dev == HCI_DEV_NONE) { +			err = -EINVAL; +			goto done; +		} + +		if (!capable(CAP_NET_ADMIN)) { +			err = -EPERM; +			goto done; +		} + +		hdev = hci_dev_get(haddr.hci_dev); +		if (!hdev) { +			err = -ENODEV; +			goto done; +		} + +		if (test_bit(HCI_UP, &hdev->flags) || +		    test_bit(HCI_INIT, &hdev->flags) || +		    test_bit(HCI_SETUP, &hdev->dev_flags)) { +			err = -EBUSY; +			hci_dev_put(hdev); +			goto done; +		} + +		if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +			err = -EUSERS; +			hci_dev_put(hdev); +			goto done; +		} + +		mgmt_index_removed(hdev); + +		err = hci_dev_open(hdev->id); +		if (err) { +			clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); +			mgmt_index_added(hdev); +			hci_dev_put(hdev); +			goto done; +		} + +		atomic_inc(&hdev->promisc); + +		hci_pi(sk)->hdev = hdev; +		break; +  	case HCI_CHANNEL_CONTROL:  		if (haddr.hci_dev != HCI_DEV_NONE) {  			err = -EINVAL; @@ -677,22 +765,30 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,  {  	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;  	struct sock *sk = sock->sk; -	struct hci_dev *hdev = hci_pi(sk)->hdev; +	struct hci_dev *hdev; +	int err = 0;  	BT_DBG("sock %p sk %p", sock, sk); -	if (!hdev) -		return -EBADFD; +	if (peer) +		return -EOPNOTSUPP;  	lock_sock(sk); +	hdev = hci_pi(sk)->hdev; +	if (!hdev) { +		err = -EBADFD; +		goto done; +	} +  	*addr_len = sizeof(*haddr);  	haddr->hci_family = AF_BLUETOOTH;  	haddr->hci_dev    = hdev->id; -	haddr->hci_channel= 0; +	haddr->hci_channel= hci_pi(sk)->channel; +done:  	release_sock(sk); -	return 0; +	return err;  }  static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, @@ -752,8 +848,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (!skb)  		return err; -	msg->msg_namelen = 0; -  	copied = skb->len;  	if (len < copied) {  		msg->msg_flags |= MSG_TRUNC; @@ -767,6 +861,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	case HCI_CHANNEL_RAW:  		hci_sock_cmsg(sk, msg, skb);  		break; +	case HCI_CHANNEL_USER:  	case HCI_CHANNEL_CONTROL:  	case HCI_CHANNEL_MONITOR:  		sock_recv_timestamp(msg, sk, skb); @@ -801,6 +896,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	switch (hci_pi(sk)->channel) {  	case HCI_CHANNEL_RAW: +	case HCI_CHANNEL_USER:  		break;  	case HCI_CHANNEL_CONTROL:  		err = mgmt_control(sk, msg, len); @@ -835,9 +931,23 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);  	skb_pull(skb, 1); -	skb->dev = (void *) hdev; -	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { +	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { +		/* No permission check is needed for user channel +		 * since that gets enforced when binding the socket. +		 * +		 * However check that the packet type is valid. +		 */ +		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && +		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && +		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { +			err = -EINVAL; +			goto drop; +		} + +		skb_queue_tail(&hdev->raw_q, skb); +		queue_work(hdev->workqueue, &hdev->tx_work); +	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {  		u16 opcode = get_unaligned_le16(skb->data);  		u16 ogf = hci_opcode_ogf(opcode);  		u16 ocf = hci_opcode_ocf(opcode); @@ -895,7 +1005,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,  	lock_sock(sk);  	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { -		err = -EINVAL; +		err = -EBADFD;  		goto done;  	} @@ -981,7 +1091,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,  	lock_sock(sk);  	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { -		err = -EINVAL; +		err = -EBADFD;  		goto done;  	} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index edf623a2904..555982a78a5 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -1,17 +1,12 @@  /* Bluetooth HCI driver model support. */ -#include <linux/debugfs.h>  #include <linux/module.h> -#include <asm/unaligned.h>  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h>  static struct class *bt_class; -struct dentry *bt_debugfs; -EXPORT_SYMBOL_GPL(bt_debugfs); -  static inline char *link_typetostr(int type)  {  	switch (type) { @@ -42,40 +37,19 @@ static ssize_t show_link_address(struct device *dev,  	return sprintf(buf, "%pMR\n", &conn->dst);  } -static ssize_t show_link_features(struct device *dev, -				  struct device_attribute *attr, char *buf) -{ -	struct hci_conn *conn = to_hci_conn(dev); - -	return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", -		       conn->features[0][0], conn->features[0][1], -		       conn->features[0][2], conn->features[0][3], -		       conn->features[0][4], conn->features[0][5], -		       conn->features[0][6], conn->features[0][7]); -} -  #define LINK_ATTR(_name, _mode, _show, _store) \  struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)  static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);  static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); -static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);  static struct attribute *bt_link_attrs[] = {  	&link_attr_type.attr,  	&link_attr_address.attr, -	&link_attr_features.attr,  	NULL  }; -static struct attribute_group bt_link_group = { -	.attrs = bt_link_attrs, -}; - -static const struct attribute_group *bt_link_groups[] = { -	&bt_link_group, -	NULL -}; +ATTRIBUTE_GROUPS(bt_link);  static void bt_link_release(struct device *dev)  { @@ -150,28 +124,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)  	hci_dev_put(hdev);  } -static inline char *host_bustostr(int bus) -{ -	switch (bus) { -	case HCI_VIRTUAL: -		return "VIRTUAL"; -	case HCI_USB: -		return "USB"; -	case HCI_PCCARD: -		return "PCCARD"; -	case HCI_UART: -		return "UART"; -	case HCI_RS232: -		return "RS232"; -	case HCI_PCI: -		return "PCI"; -	case HCI_SDIO: -		return "SDIO"; -	default: -		return "UNKNOWN"; -	} -} -  static inline char *host_typetostr(int type)  {  	switch (type) { @@ -184,13 +136,6 @@ static inline char *host_typetostr(int type)  	}  } -static ssize_t show_bus(struct device *dev, -			struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); -} -  static ssize_t show_type(struct device *dev,  			 struct device_attribute *attr, char *buf)  { @@ -212,14 +157,6 @@ static ssize_t show_name(struct device *dev,  	return sprintf(buf, "%s\n", name);  } -static ssize_t show_class(struct device *dev, -			  struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], -		       hdev->dev_class[1], hdev->dev_class[0]); -} -  static ssize_t show_address(struct device *dev,  			    struct device_attribute *attr, char *buf)  { @@ -227,161 +164,18 @@ static ssize_t show_address(struct device *dev,  	return sprintf(buf, "%pMR\n", &hdev->bdaddr);  } -static ssize_t show_features(struct device *dev, -			     struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); - -	return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", -		       hdev->features[0][0], hdev->features[0][1], -		       hdev->features[0][2], hdev->features[0][3], -		       hdev->features[0][4], hdev->features[0][5], -		       hdev->features[0][6], hdev->features[0][7]); -} - -static ssize_t show_manufacturer(struct device *dev, -				 struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->manufacturer); -} - -static ssize_t show_hci_version(struct device *dev, -				struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->hci_ver); -} - -static ssize_t show_hci_revision(struct device *dev, -				 struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->hci_rev); -} - -static ssize_t show_idle_timeout(struct device *dev, -				 struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->idle_timeout); -} - -static ssize_t store_idle_timeout(struct device *dev, -				  struct device_attribute *attr, -				  const char *buf, size_t count) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	unsigned int val; -	int rv; - -	rv = kstrtouint(buf, 0, &val); -	if (rv < 0) -		return rv; - -	if (val != 0 && (val < 500 || val > 3600000)) -		return -EINVAL; - -	hdev->idle_timeout = val; - -	return count; -} - -static ssize_t show_sniff_max_interval(struct device *dev, -				       struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->sniff_max_interval); -} - -static ssize_t store_sniff_max_interval(struct device *dev, -					struct device_attribute *attr, -					const char *buf, size_t count) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	u16 val; -	int rv; - -	rv = kstrtou16(buf, 0, &val); -	if (rv < 0) -		return rv; - -	if (val == 0 || val % 2 || val < hdev->sniff_min_interval) -		return -EINVAL; - -	hdev->sniff_max_interval = val; - -	return count; -} - -static ssize_t show_sniff_min_interval(struct device *dev, -				       struct device_attribute *attr, char *buf) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	return sprintf(buf, "%d\n", hdev->sniff_min_interval); -} - -static ssize_t store_sniff_min_interval(struct device *dev, -					struct device_attribute *attr, -					const char *buf, size_t count) -{ -	struct hci_dev *hdev = to_hci_dev(dev); -	u16 val; -	int rv; - -	rv = kstrtou16(buf, 0, &val); -	if (rv < 0) -		return rv; - -	if (val == 0 || val % 2 || val > hdev->sniff_max_interval) -		return -EINVAL; - -	hdev->sniff_min_interval = val; - -	return count; -} - -static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);  static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);  static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); -static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);  static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); -static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); -static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); -static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); -static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); - -static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, -		   show_idle_timeout, store_idle_timeout); -static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, -		   show_sniff_max_interval, store_sniff_max_interval); -static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, -		   show_sniff_min_interval, store_sniff_min_interval);  static struct attribute *bt_host_attrs[] = { -	&dev_attr_bus.attr,  	&dev_attr_type.attr,  	&dev_attr_name.attr, -	&dev_attr_class.attr,  	&dev_attr_address.attr, -	&dev_attr_features.attr, -	&dev_attr_manufacturer.attr, -	&dev_attr_hci_version.attr, -	&dev_attr_hci_revision.attr, -	&dev_attr_idle_timeout.attr, -	&dev_attr_sniff_max_interval.attr, -	&dev_attr_sniff_min_interval.attr,  	NULL  }; -static struct attribute_group bt_host_group = { -	.attrs = bt_host_attrs, -}; - -static const struct attribute_group *bt_host_groups[] = { -	&bt_host_group, -	NULL -}; +ATTRIBUTE_GROUPS(bt_host);  static void bt_host_release(struct device *dev)  { @@ -396,141 +190,6 @@ static struct device_type bt_host = {  	.release = bt_host_release,  }; -static int inquiry_cache_show(struct seq_file *f, void *p) -{ -	struct hci_dev *hdev = f->private; -	struct discovery_state *cache = &hdev->discovery; -	struct inquiry_entry *e; - -	hci_dev_lock(hdev); - -	list_for_each_entry(e, &cache->all, all) { -		struct inquiry_data *data = &e->data; -		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", -			   &data->bdaddr, -			   data->pscan_rep_mode, data->pscan_period_mode, -			   data->pscan_mode, data->dev_class[2], -			   data->dev_class[1], data->dev_class[0], -			   __le16_to_cpu(data->clock_offset), -			   data->rssi, data->ssp_mode, e->timestamp); -	} - -	hci_dev_unlock(hdev); - -	return 0; -} - -static int inquiry_cache_open(struct inode *inode, struct file *file) -{ -	return single_open(file, inquiry_cache_show, inode->i_private); -} - -static const struct file_operations inquiry_cache_fops = { -	.open		= inquiry_cache_open, -	.read		= seq_read, -	.llseek		= seq_lseek, -	.release	= single_release, -}; - -static int blacklist_show(struct seq_file *f, void *p) -{ -	struct hci_dev *hdev = f->private; -	struct bdaddr_list *b; - -	hci_dev_lock(hdev); - -	list_for_each_entry(b, &hdev->blacklist, list) -		seq_printf(f, "%pMR\n", &b->bdaddr); - -	hci_dev_unlock(hdev); - -	return 0; -} - -static int blacklist_open(struct inode *inode, struct file *file) -{ -	return single_open(file, blacklist_show, inode->i_private); -} - -static const struct file_operations blacklist_fops = { -	.open		= blacklist_open, -	.read		= seq_read, -	.llseek		= seq_lseek, -	.release	= single_release, -}; - -static void print_bt_uuid(struct seq_file *f, u8 *uuid) -{ -	u32 data0, data5; -	u16 data1, data2, data3, data4; - -	data5 = get_unaligned_le32(uuid); -	data4 = get_unaligned_le16(uuid + 4); -	data3 = get_unaligned_le16(uuid + 6); -	data2 = get_unaligned_le16(uuid + 8); -	data1 = get_unaligned_le16(uuid + 10); -	data0 = get_unaligned_le32(uuid + 12); - -	seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n", -		   data0, data1, data2, data3, data4, data5); -} - -static int uuids_show(struct seq_file *f, void *p) -{ -	struct hci_dev *hdev = f->private; -	struct bt_uuid *uuid; - -	hci_dev_lock(hdev); - -	list_for_each_entry(uuid, &hdev->uuids, list) -		print_bt_uuid(f, uuid->uuid); - -	hci_dev_unlock(hdev); - -	return 0; -} - -static int uuids_open(struct inode *inode, struct file *file) -{ -	return single_open(file, uuids_show, inode->i_private); -} - -static const struct file_operations uuids_fops = { -	.open		= uuids_open, -	.read		= seq_read, -	.llseek		= seq_lseek, -	.release	= single_release, -}; - -static int auto_accept_delay_set(void *data, u64 val) -{ -	struct hci_dev *hdev = data; - -	hci_dev_lock(hdev); - -	hdev->auto_accept_delay = val; - -	hci_dev_unlock(hdev); - -	return 0; -} - -static int auto_accept_delay_get(void *data, u64 *val) -{ -	struct hci_dev *hdev = data; - -	hci_dev_lock(hdev); - -	*val = hdev->auto_accept_delay; - -	hci_dev_unlock(hdev); - -	return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, -			auto_accept_delay_set, "%llu\n"); -  void hci_init_sysfs(struct hci_dev *hdev)  {  	struct device *dev = &hdev->dev; @@ -542,52 +201,8 @@ void hci_init_sysfs(struct hci_dev *hdev)  	device_initialize(dev);  } -int hci_add_sysfs(struct hci_dev *hdev) -{ -	struct device *dev = &hdev->dev; -	int err; - -	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - -	dev_set_name(dev, "%s", hdev->name); - -	err = device_add(dev); -	if (err < 0) -		return err; - -	if (!bt_debugfs) -		return 0; - -	hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); -	if (!hdev->debugfs) -		return 0; - -	debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, -			    hdev, &inquiry_cache_fops); - -	debugfs_create_file("blacklist", 0444, hdev->debugfs, -			    hdev, &blacklist_fops); - -	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); - -	debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, -			    &auto_accept_delay_fops); -	return 0; -} - -void hci_del_sysfs(struct hci_dev *hdev) -{ -	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - -	debugfs_remove_recursive(hdev->debugfs); - -	device_del(&hdev->dev); -} -  int __init bt_sysfs_init(void)  { -	bt_debugfs = debugfs_create_dir("bluetooth", NULL); -  	bt_class = class_create(THIS_MODULE, "bluetooth");  	return PTR_ERR_OR_ZERO(bt_class); @@ -596,6 +211,4 @@ int __init bt_sysfs_init(void)  void bt_sysfs_cleanup(void)  {  	class_destroy(bt_class); - -	debugfs_remove_recursive(bt_debugfs);  } diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index bdc35a7a7fe..8181ea4bc2f 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -223,51 +223,6 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)  	input_sync(dev);  } -static int hidp_send_report(struct hidp_session *session, struct hid_report *report) -{ -	unsigned char hdr; -	u8 *buf; -	int rsize, ret; - -	buf = hid_alloc_report_buf(report, GFP_ATOMIC); -	if (!buf) -		return -EIO; - -	hid_output_report(report, buf); -	hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; - -	rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); -	ret = hidp_send_intr_message(session, hdr, buf, rsize); - -	kfree(buf); -	return ret; -} - -static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, -			       unsigned int code, int value) -{ -	struct hid_device *hid = input_get_drvdata(dev); -	struct hidp_session *session = hid->driver_data; -	struct hid_field *field; -	int offset; - -	BT_DBG("session %p type %d code %d value %d", -	       session, type, code, value); - -	if (type != EV_LED) -		return -1; - -	offset = hidinput_find_field(hid, type, code, &field); -	if (offset == -1) { -		hid_warn(dev, "event field not found\n"); -		return -1; -	} - -	hid_set_field(field, offset, value); - -	return hidp_send_report(session, field->report); -} -  static int hidp_get_raw_report(struct hid_device *hid,  		unsigned char report_number,  		unsigned char *data, size_t count, @@ -353,17 +308,24 @@ err:  	return ret;  } -static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, -		unsigned char report_type) +static int hidp_set_raw_report(struct hid_device *hid, unsigned char reportnum, +			       unsigned char *data, size_t count, +			       unsigned char report_type)  {  	struct hidp_session *session = hid->driver_data;  	int ret; -	if (report_type == HID_OUTPUT_REPORT) { -		report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; -		return hidp_send_intr_message(session, report_type, -					      data, count); -	} else if (report_type != HID_FEATURE_REPORT) { +	switch (report_type) { +	case HID_FEATURE_REPORT: +		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; +		break; +	case HID_INPUT_REPORT: +		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_INPUT; +		break; +	case HID_OUTPUT_REPORT: +		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT; +		break; +	default:  		return -EINVAL;  	} @@ -371,8 +333,8 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s  		return -ERESTARTSYS;  	/* Set up our wait, and send the report request to the device. */ +	data[0] = reportnum;  	set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); -	report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;  	ret = hidp_send_ctrl_message(session, report_type, data, count);  	if (ret)  		goto err; @@ -411,6 +373,29 @@ err:  	return ret;  } +static int hidp_output_report(struct hid_device *hid, __u8 *data, size_t count) +{ +	struct hidp_session *session = hid->driver_data; + +	return hidp_send_intr_message(session, +				      HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT, +				      data, count); +} + +static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum, +			    __u8 *buf, size_t len, unsigned char rtype, +			    int reqtype) +{ +	switch (reqtype) { +	case HID_REQ_GET_REPORT: +		return hidp_get_raw_report(hid, reportnum, buf, len, rtype); +	case HID_REQ_SET_REPORT: +		return hidp_set_raw_report(hid, reportnum, buf, len, rtype); +	default: +		return -EIO; +	} +} +  static void hidp_idle_timeout(unsigned long arg)  {  	struct hidp_session *session = (struct hidp_session *) arg; @@ -430,6 +415,16 @@ static void hidp_del_timer(struct hidp_session *session)  		del_timer(&session->timer);  } +static void hidp_process_report(struct hidp_session *session, +				int type, const u8 *data, int len, int intr) +{ +	if (len > HID_MAX_BUFFER_SIZE) +		len = HID_MAX_BUFFER_SIZE; + +	memcpy(session->input_buf, data, len); +	hid_input_report(session->hid, type, session->input_buf, len, intr); +} +  static void hidp_process_handshake(struct hidp_session *session,  					unsigned char param)  { @@ -502,7 +497,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,  			hidp_input_report(session, skb);  		if (session->hid) -			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); +			hidp_process_report(session, HID_INPUT_REPORT, +					    skb->data, skb->len, 0);  		break;  	case HIDP_DATA_RTYPE_OTHER: @@ -584,7 +580,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,  			hidp_input_report(session, skb);  		if (session->hid) { -			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); +			hidp_process_report(session, HID_INPUT_REPORT, +					    skb->data, skb->len, 1);  			BT_DBG("report len %d", skb->len);  		}  	} else { @@ -727,7 +724,8 @@ static struct hid_ll_driver hidp_hid_driver = {  	.stop = hidp_stop,  	.open  = hidp_open,  	.close = hidp_close, -	.hidinput_input_event = hidp_hidinput_event, +	.raw_request = hidp_raw_request, +	.output_report = hidp_output_report,  };  /* This function sets up the hid device. It does not add it @@ -767,17 +765,17 @@ static int hidp_setup_hid(struct hidp_session *session,  	strncpy(hid->name, req->name, sizeof(req->name) - 1);  	snprintf(hid->phys, sizeof(hid->phys), "%pMR", -		 &bt_sk(session->ctrl_sock->sk)->src); +		 &l2cap_pi(session->ctrl_sock->sk)->chan->src); +	/* NOTE: Some device modules depend on the dst address being stored in +	 * uniq. Please be aware of this before making changes to this behavior. +	 */  	snprintf(hid->uniq, sizeof(hid->uniq), "%pMR", -		 &bt_sk(session->ctrl_sock->sk)->dst); +		 &l2cap_pi(session->ctrl_sock->sk)->chan->dst);  	hid->dev.parent = &session->conn->hcon->dev;  	hid->ll_driver = &hidp_hid_driver; -	hid->hid_get_raw_report = hidp_get_raw_report; -	hid->hid_output_raw_report = hidp_output_raw_report; -  	/* True if device is blacklisted in drivers/hid/hid-core.c */  	if (hid_ignore(hid)) {  		hid_destroy_device(session->hid); @@ -1283,23 +1281,29 @@ static int hidp_session_thread(void *arg)  static int hidp_verify_sockets(struct socket *ctrl_sock,  			       struct socket *intr_sock)  { +	struct l2cap_chan *ctrl_chan, *intr_chan;  	struct bt_sock *ctrl, *intr;  	struct hidp_session *session;  	if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))  		return -EINVAL; +	ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan; +	intr_chan = l2cap_pi(intr_sock->sk)->chan; + +	if (bacmp(&ctrl_chan->src, &intr_chan->src) || +	    bacmp(&ctrl_chan->dst, &intr_chan->dst)) +		return -ENOTUNIQ; +  	ctrl = bt_sk(ctrl_sock->sk);  	intr = bt_sk(intr_sock->sk); -	if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst)) -		return -ENOTUNIQ;  	if (ctrl->sk.sk_state != BT_CONNECTED ||  	    intr->sk.sk_state != BT_CONNECTED)  		return -EBADFD;  	/* early session check, we check again during session registration */ -	session = hidp_session_find(&ctrl->dst); +	session = hidp_session_find(&ctrl_chan->dst);  	if (session) {  		hidp_session_put(session);  		return -EEXIST; @@ -1332,7 +1336,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,  	if (!conn)  		return -EBADFD; -	ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock, +	ret = hidp_session_new(&session, &chan->dst, ctrl_sock,  			       intr_sock, req, conn);  	if (ret)  		goto out_conn; diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index 9e6cc355310..8798492a6e9 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h @@ -24,6 +24,7 @@  #define __HIDP_H  #include <linux/types.h> +#include <linux/hid.h>  #include <linux/kref.h>  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/l2cap.h> @@ -179,10 +180,13 @@ struct hidp_session {  	/* Used in hidp_output_raw_report() */  	int output_report_success; /* boolean */ + +	/* temporary input buffer */ +	u8 input_buf[HID_MAX_BUFFER_SIZE];  };  /* HIDP init defines */ -extern int __init hidp_init_sockets(void); -extern void __exit hidp_cleanup_sockets(void); +int __init hidp_init_sockets(void); +void __exit hidp_cleanup_sockets(void);  #endif /* __HIDP_H */ diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 63fa11109a1..323f23cd2c3 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -36,18 +36,25 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h>  #include <net/bluetooth/l2cap.h> -#include <net/bluetooth/smp.h> -#include <net/bluetooth/a2mp.h> -#include <net/bluetooth/amp.h> + +#include "smp.h" +#include "a2mp.h" +#include "amp.h" +#include "6lowpan.h" + +#define LE_FLOWCTL_MAX_CREDITS 65535  bool disable_ertm; -static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; -static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, }; +static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; +static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };  static LIST_HEAD(chan_list);  static DEFINE_RWLOCK(chan_list_lock); +static u16 le_max_credits = L2CAP_LE_MAX_CREDITS; +static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS; +  static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,  				       u8 code, u8 ident, u16 dlen, void *data);  static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, @@ -58,6 +65,18 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);  static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,  		     struct sk_buff_head *skbs, u8 event); +static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type) +{ +	if (hcon->type == LE_LINK) { +		if (type == ADDR_LE_DEV_PUBLIC) +			return BDADDR_LE_PUBLIC; +		else +			return BDADDR_LE_RANDOM; +	} + +	return BDADDR_BREDR; +} +  /* ---- L2CAP channels ---- */  static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, @@ -148,7 +167,7 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)  	struct l2cap_chan *c;  	list_for_each_entry(c, &chan_list, global_l) { -		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) +		if (c->sport == psm && !bacmp(&c->src, src))  			return c;  	}  	return NULL; @@ -200,9 +219,14 @@ int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)  static u16 l2cap_alloc_cid(struct l2cap_conn *conn)  { -	u16 cid = L2CAP_CID_DYN_START; +	u16 cid, dyn_end; + +	if (conn->hcon->type == LE_LINK) +		dyn_end = L2CAP_CID_LE_DYN_END; +	else +		dyn_end = L2CAP_CID_DYN_END; -	for (; cid < L2CAP_CID_DYN_END; cid++) { +	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {  		if (!__l2cap_get_chan_by_scid(conn, cid))  			return cid;  	} @@ -210,38 +234,25 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)  	return 0;  } -static void __l2cap_state_change(struct l2cap_chan *chan, int state) +static void l2cap_state_change(struct l2cap_chan *chan, int state)  {  	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),  	       state_to_string(state));  	chan->state = state; -	chan->ops->state_change(chan, state); +	chan->ops->state_change(chan, state, 0);  } -static void l2cap_state_change(struct l2cap_chan *chan, int state) +static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, +						int state, int err)  { -	struct sock *sk = chan->sk; - -	lock_sock(sk); -	__l2cap_state_change(chan, state); -	release_sock(sk); -} - -static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err) -{ -	struct sock *sk = chan->sk; - -	sk->sk_err = err; +	chan->state = state; +	chan->ops->state_change(chan, chan->state, err);  }  static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)  { -	struct sock *sk = chan->sk; - -	lock_sock(sk); -	__l2cap_chan_set_err(chan, err); -	release_sock(sk); +	chan->ops->state_change(chan, chan->state, err);  }  static void __set_retrans_timer(struct l2cap_chan *chan) @@ -321,44 +332,20 @@ static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,  	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;  } -static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq) +static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)  { +	u16 seq = seq_list->head;  	u16 mask = seq_list->mask; -	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) { -		/* In case someone tries to pop the head of an empty list */ -		return L2CAP_SEQ_LIST_CLEAR; -	} else if (seq_list->head == seq) { -		/* Head can be removed in constant time */ -		seq_list->head = seq_list->list[seq & mask]; -		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; - -		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { -			seq_list->head = L2CAP_SEQ_LIST_CLEAR; -			seq_list->tail = L2CAP_SEQ_LIST_CLEAR; -		} -	} else { -		/* Walk the list to find the sequence number */ -		u16 prev = seq_list->head; -		while (seq_list->list[prev & mask] != seq) { -			prev = seq_list->list[prev & mask]; -			if (prev == L2CAP_SEQ_LIST_TAIL) -				return L2CAP_SEQ_LIST_CLEAR; -		} +	seq_list->head = seq_list->list[seq & mask]; +	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; -		/* Unlink the number from the list and clear it */ -		seq_list->list[prev & mask] = seq_list->list[seq & mask]; -		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; -		if (seq_list->tail == seq) -			seq_list->tail = prev; +	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { +		seq_list->head = L2CAP_SEQ_LIST_CLEAR; +		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;  	} -	return seq; -} -static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) -{ -	/* Remove the head in constant time */ -	return l2cap_seq_list_remove(seq_list, seq_list->head); +	return seq;  }  static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) @@ -484,12 +471,30 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)  	chan->max_tx = L2CAP_DEFAULT_MAX_TX;  	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;  	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; +	chan->remote_max_tx = chan->max_tx; +	chan->remote_tx_win = chan->tx_win;  	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;  	chan->sec_level = BT_SECURITY_LOW; +	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; +	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; +	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; +	chan->conf_state = 0;  	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);  } +static void l2cap_le_flowctl_init(struct l2cap_chan *chan) +{ +	chan->sdu = NULL; +	chan->sdu_last_frag = NULL; +	chan->sdu_len = 0; +	chan->tx_credits = 0; +	chan->rx_credits = le_max_credits; +	chan->mps = min_t(u16, chan->imtu, le_default_mps); + +	skb_queue_head_init(&chan->tx_q); +} +  void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)  {  	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, @@ -501,18 +506,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)  	switch (chan->chan_type) {  	case L2CAP_CHAN_CONN_ORIENTED: -		if (conn->hcon->type == LE_LINK) { -			/* LE connection */ -			chan->omtu = L2CAP_DEFAULT_MTU; -			if (chan->dcid == L2CAP_CID_ATT) -				chan->scid = L2CAP_CID_ATT; -			else -				chan->scid = l2cap_alloc_cid(conn); -		} else { -			/* Alloc CID for connection-oriented socket */ -			chan->scid = l2cap_alloc_cid(conn); +		/* Alloc CID for connection-oriented socket */ +		chan->scid = l2cap_alloc_cid(conn); +		if (conn->hcon->type == ACL_LINK)  			chan->omtu = L2CAP_DEFAULT_MTU; -		}  		break;  	case L2CAP_CHAN_CONN_LESS: @@ -522,11 +519,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)  		chan->omtu = L2CAP_DEFAULT_MTU;  		break; -	case L2CAP_CHAN_CONN_FIX_A2MP: -		chan->scid = L2CAP_CID_A2MP; -		chan->dcid = L2CAP_CID_A2MP; -		chan->omtu = L2CAP_A2MP_DEFAULT_MTU; -		chan->imtu = L2CAP_A2MP_DEFAULT_MTU; +	case L2CAP_CHAN_FIXED: +		/* Caller will set CID and CID specific MTU values */  		break;  	default: @@ -574,7 +568,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)  		chan->conn = NULL; -		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) +		if (chan->scid != L2CAP_CID_A2MP)  			hci_conn_drop(conn->hcon);  		if (mgr && mgr->bredr_chan == chan) @@ -597,6 +591,10 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)  	case L2CAP_MODE_BASIC:  		break; +	case L2CAP_MODE_LE_FLOWCTL: +		skb_queue_purge(&chan->tx_q); +		break; +  	case L2CAP_MODE_ERTM:  		__clear_retrans_timer(chan);  		__clear_monitor_timer(chan); @@ -617,13 +615,72 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)  	return;  } +void l2cap_conn_update_id_addr(struct hci_conn *hcon) +{ +	struct l2cap_conn *conn = hcon->l2cap_data; +	struct l2cap_chan *chan; + +	mutex_lock(&conn->chan_lock); + +	list_for_each_entry(chan, &conn->chan_l, list) { +		l2cap_chan_lock(chan); +		bacpy(&chan->dst, &hcon->dst); +		chan->dst_type = bdaddr_type(hcon, hcon->dst_type); +		l2cap_chan_unlock(chan); +	} + +	mutex_unlock(&conn->chan_lock); +} + +static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) +{ +	struct l2cap_conn *conn = chan->conn; +	struct l2cap_le_conn_rsp rsp; +	u16 result; + +	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) +		result = L2CAP_CR_AUTHORIZATION; +	else +		result = L2CAP_CR_BAD_PSM; + +	l2cap_state_change(chan, BT_DISCONN); + +	rsp.dcid    = cpu_to_le16(chan->scid); +	rsp.mtu     = cpu_to_le16(chan->imtu); +	rsp.mps     = cpu_to_le16(chan->mps); +	rsp.credits = cpu_to_le16(chan->rx_credits); +	rsp.result  = cpu_to_le16(result); + +	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), +		       &rsp); +} + +static void l2cap_chan_connect_reject(struct l2cap_chan *chan) +{ +	struct l2cap_conn *conn = chan->conn; +	struct l2cap_conn_rsp rsp; +	u16 result; + +	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) +		result = L2CAP_CR_SEC_BLOCK; +	else +		result = L2CAP_CR_BAD_PSM; + +	l2cap_state_change(chan, BT_DISCONN); + +	rsp.scid   = cpu_to_le16(chan->dcid); +	rsp.dcid   = cpu_to_le16(chan->scid); +	rsp.result = cpu_to_le16(result); +	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + +	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); +} +  void l2cap_chan_close(struct l2cap_chan *chan, int reason)  {  	struct l2cap_conn *conn = chan->conn; -	struct sock *sk = chan->sk; -	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state), -	       sk); +	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));  	switch (chan->state) {  	case BT_LISTEN: @@ -632,32 +689,19 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)  	case BT_CONNECTED:  	case BT_CONFIG: -		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && -		    conn->hcon->type == ACL_LINK) { -			__set_chan_timer(chan, sk->sk_sndtimeo); +		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { +			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));  			l2cap_send_disconn_req(chan, reason);  		} else  			l2cap_chan_del(chan, reason);  		break;  	case BT_CONNECT2: -		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && -		    conn->hcon->type == ACL_LINK) { -			struct l2cap_conn_rsp rsp; -			__u16 result; - -			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) -				result = L2CAP_CR_SEC_BLOCK; -			else -				result = L2CAP_CR_BAD_PSM; -			l2cap_state_change(chan, BT_DISCONN); - -			rsp.scid   = cpu_to_le16(chan->dcid); -			rsp.dcid   = cpu_to_le16(chan->scid); -			rsp.result = cpu_to_le16(result); -			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); -			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, -				       sizeof(rsp), &rsp); +		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { +			if (conn->hcon->type == ACL_LINK) +				l2cap_chan_connect_reject(chan); +			else if (conn->hcon->type == LE_LINK) +				l2cap_chan_le_connect_reject(chan);  		}  		l2cap_chan_del(chan, reason); @@ -676,32 +720,52 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)  static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)  { -	if (chan->chan_type == L2CAP_CHAN_RAW) { +	switch (chan->chan_type) { +	case L2CAP_CHAN_RAW:  		switch (chan->sec_level) {  		case BT_SECURITY_HIGH: +		case BT_SECURITY_FIPS:  			return HCI_AT_DEDICATED_BONDING_MITM;  		case BT_SECURITY_MEDIUM:  			return HCI_AT_DEDICATED_BONDING;  		default:  			return HCI_AT_NO_BONDING;  		} -	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) { -		if (chan->sec_level == BT_SECURITY_LOW) -			chan->sec_level = BT_SECURITY_SDP; - -		if (chan->sec_level == BT_SECURITY_HIGH) +		break; +	case L2CAP_CHAN_CONN_LESS: +		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { +			if (chan->sec_level == BT_SECURITY_LOW) +				chan->sec_level = BT_SECURITY_SDP; +		} +		if (chan->sec_level == BT_SECURITY_HIGH || +		    chan->sec_level == BT_SECURITY_FIPS)  			return HCI_AT_NO_BONDING_MITM;  		else  			return HCI_AT_NO_BONDING; -	} else { +		break; +	case L2CAP_CHAN_CONN_ORIENTED: +		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { +			if (chan->sec_level == BT_SECURITY_LOW) +				chan->sec_level = BT_SECURITY_SDP; + +			if (chan->sec_level == BT_SECURITY_HIGH || +			    chan->sec_level == BT_SECURITY_FIPS) +				return HCI_AT_NO_BONDING_MITM; +			else +				return HCI_AT_NO_BONDING; +		} +		/* fall through */ +	default:  		switch (chan->sec_level) {  		case BT_SECURITY_HIGH: +		case BT_SECURITY_FIPS:  			return HCI_AT_GENERAL_BONDING_MITM;  		case BT_SECURITY_MEDIUM:  			return HCI_AT_GENERAL_BONDING;  		default:  			return HCI_AT_NO_BONDING;  		} +		break;  	}  } @@ -711,6 +775,9 @@ int l2cap_chan_check_security(struct l2cap_chan *chan)  	struct l2cap_conn *conn = chan->conn;  	__u8 auth_type; +	if (conn->hcon->type == LE_LINK) +		return smp_conn_security(conn->hcon, chan->sec_level); +  	auth_type = l2cap_get_auth_type(chan);  	return hci_conn_security(conn->hcon, chan->sec_level, auth_type); @@ -1015,14 +1082,29 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)  static bool __amp_capable(struct l2cap_chan *chan)  {  	struct l2cap_conn *conn = chan->conn; +	struct hci_dev *hdev; +	bool amp_available = false; -	if (enable_hs && -	    hci_amp_capable() && -	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED && -	    conn->fixed_chan_mask & L2CAP_FC_A2MP) -		return true; -	else +	if (!conn->hs_enabled)  		return false; + +	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP)) +		return false; + +	read_lock(&hci_dev_list_lock); +	list_for_each_entry(hdev, &hci_dev_list, list) { +		if (hdev->amp_type != AMP_TYPE_BREDR && +		    test_bit(HCI_UP, &hdev->flags)) { +			amp_available = true; +			break; +		} +	} +	read_unlock(&hci_dev_list_lock); + +	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) +		return amp_available; + +	return false;  }  static bool l2cap_check_efs(struct l2cap_chan *chan) @@ -1122,16 +1204,57 @@ static void l2cap_chan_ready(struct l2cap_chan *chan)  	chan->conf_state = 0;  	__clear_chan_timer(chan); +	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits) +		chan->ops->suspend(chan); +  	chan->state = BT_CONNECTED;  	chan->ops->ready(chan);  } +static void l2cap_le_connect(struct l2cap_chan *chan) +{ +	struct l2cap_conn *conn = chan->conn; +	struct l2cap_le_conn_req req; + +	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) +		return; + +	req.psm     = chan->psm; +	req.scid    = cpu_to_le16(chan->scid); +	req.mtu     = cpu_to_le16(chan->imtu); +	req.mps     = cpu_to_le16(chan->mps); +	req.credits = cpu_to_le16(chan->rx_credits); + +	chan->ident = l2cap_get_ident(conn); + +	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, +		       sizeof(req), &req); +} + +static void l2cap_le_start(struct l2cap_chan *chan) +{ +	struct l2cap_conn *conn = chan->conn; + +	if (!smp_conn_security(conn->hcon, chan->sec_level)) +		return; + +	if (!chan->psm) { +		l2cap_chan_ready(chan); +		return; +	} + +	if (chan->state == BT_CONNECT) +		l2cap_le_connect(chan); +} +  static void l2cap_start_connection(struct l2cap_chan *chan)  {  	if (__amp_capable(chan)) {  		BT_DBG("chan %p AMP capable: discover AMPs", chan);  		a2mp_discover_amp(chan); +	} else if (chan->conn->hcon->type == LE_LINK) { +		l2cap_le_start(chan);  	} else {  		l2cap_send_conn_req(chan);  	} @@ -1142,7 +1265,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)  	struct l2cap_conn *conn = chan->conn;  	if (conn->hcon->type == LE_LINK) { -		l2cap_chan_ready(chan); +		l2cap_le_start(chan);  		return;  	} @@ -1156,7 +1279,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)  		}  	} else {  		struct l2cap_info_req req; -		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); +		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);  		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;  		conn->info_ident = l2cap_get_ident(conn); @@ -1186,7 +1309,6 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)  static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)  { -	struct sock *sk = chan->sk;  	struct l2cap_conn *conn = chan->conn;  	struct l2cap_disconn_req req; @@ -1199,7 +1321,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)  		__clear_ack_timer(chan);  	} -	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { +	if (chan->scid == L2CAP_CID_A2MP) {  		l2cap_state_change(chan, BT_DISCONN);  		return;  	} @@ -1209,10 +1331,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)  	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,  		       sizeof(req), &req); -	lock_sock(sk); -	__l2cap_state_change(chan, BT_DISCONN); -	__l2cap_chan_set_err(chan, err); -	release_sock(sk); +	l2cap_state_change_and_error(chan, BT_DISCONN, err);  }  /* ---- L2CAP connections ---- */ @@ -1225,8 +1344,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)  	mutex_lock(&conn->chan_lock);  	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { -		struct sock *sk = chan->sk; -  		l2cap_chan_lock(chan);  		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { @@ -1258,22 +1375,19 @@ static void l2cap_conn_start(struct l2cap_conn *conn)  			rsp.dcid = cpu_to_le16(chan->scid);  			if (l2cap_chan_check_security(chan)) { -				lock_sock(sk); -				if (test_bit(BT_SK_DEFER_SETUP, -					     &bt_sk(sk)->flags)) { -					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); -					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); +				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { +					rsp.result = cpu_to_le16(L2CAP_CR_PEND); +					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);  					chan->ops->defer(chan);  				} else { -					__l2cap_state_change(chan, BT_CONFIG); -					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); -					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); +					l2cap_state_change(chan, BT_CONFIG); +					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); +					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);  				} -				release_sock(sk);  			} else { -				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); -				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); +				rsp.result = cpu_to_le16(L2CAP_CR_PEND); +				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);  			}  			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, @@ -1309,8 +1423,6 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,  	read_lock(&chan_list_lock);  	list_for_each_entry(c, &chan_list, global_l) { -		struct sock *sk = c->sk; -  		if (state && c->state != state)  			continue; @@ -1319,16 +1431,16 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,  			int src_any, dst_any;  			/* Exact match. */ -			src_match = !bacmp(&bt_sk(sk)->src, src); -			dst_match = !bacmp(&bt_sk(sk)->dst, dst); +			src_match = !bacmp(&c->src, src); +			dst_match = !bacmp(&c->dst, dst);  			if (src_match && dst_match) {  				read_unlock(&chan_list_lock);  				return c;  			}  			/* Closest match */ -			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY); -			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY); +			src_any = !bacmp(&c->src, BDADDR_ANY); +			dst_any = !bacmp(&c->dst, BDADDR_ANY);  			if ((src_match && dst_any) || (src_any && dst_match) ||  			    (src_any && dst_any))  				c1 = c; @@ -1342,14 +1454,17 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,  static void l2cap_le_conn_ready(struct l2cap_conn *conn)  { -	struct sock *parent; +	struct hci_conn *hcon = conn->hcon;  	struct l2cap_chan *chan, *pchan; +	u8 dst_type;  	BT_DBG(""); +	bt_6lowpan_add_conn(conn); +  	/* Check if we have socket listening on cid */  	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, -					  conn->src, conn->dst); +					  &hcon->src, &hcon->dst);  	if (!pchan)  		return; @@ -1357,23 +1472,27 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)  	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))  		return; -	parent = pchan->sk; +	dst_type = bdaddr_type(hcon, hcon->dst_type); -	lock_sock(parent); +	/* If device is blocked, do not create a channel for it */ +	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type)) +		return; + +	l2cap_chan_lock(pchan);  	chan = pchan->ops->new_connection(pchan);  	if (!chan)  		goto clean; -	chan->dcid = L2CAP_CID_ATT; - -	bacpy(&bt_sk(chan->sk)->src, conn->src); -	bacpy(&bt_sk(chan->sk)->dst, conn->dst); +	bacpy(&chan->src, &hcon->src); +	bacpy(&chan->dst, &hcon->dst); +	chan->src_type = bdaddr_type(hcon, hcon->src_type); +	chan->dst_type = dst_type;  	__l2cap_chan_add(conn, chan);  clean: -	release_sock(parent); +	l2cap_chan_unlock(pchan);  }  static void l2cap_conn_ready(struct l2cap_conn *conn) @@ -1398,22 +1517,15 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)  		l2cap_chan_lock(chan); -		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { +		if (chan->scid == L2CAP_CID_A2MP) {  			l2cap_chan_unlock(chan);  			continue;  		}  		if (hcon->type == LE_LINK) { -			if (smp_conn_security(hcon, chan->sec_level)) -				l2cap_chan_ready(chan); - +			l2cap_le_start(chan);  		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { -			struct sock *sk = chan->sk; -			__clear_chan_timer(chan); -			lock_sock(sk); -			__l2cap_state_change(chan, BT_CONNECTED); -			sk->sk_state_change(sk); -			release_sock(sk); +			l2cap_chan_ready(chan);  		} else if (chan->state == BT_CONNECT) {  			l2cap_do_start(chan); @@ -1423,6 +1535,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)  	}  	mutex_unlock(&conn->chan_lock); + +	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);  }  /* Notify sockets that we cannot guaranty reliability anymore */ @@ -1548,6 +1662,15 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)  	kfree_skb(conn->rx_skb); +	skb_queue_purge(&conn->pending_rx); + +	/* We can not call flush_work(&conn->pending_rx_work) here since we +	 * might block if we are running on a worker from the same workqueue +	 * pending_rx_work is waiting on. +	 */ +	if (work_pending(&conn->pending_rx_work)) +		cancel_work_sync(&conn->pending_rx_work); +  	l2cap_unregister_all_users(conn);  	mutex_lock(&conn->chan_lock); @@ -1595,65 +1718,6 @@ static void security_timeout(struct work_struct *work)  	}  } -static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) -{ -	struct l2cap_conn *conn = hcon->l2cap_data; -	struct hci_chan *hchan; - -	if (conn) -		return conn; - -	hchan = hci_chan_create(hcon); -	if (!hchan) -		return NULL; - -	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL); -	if (!conn) { -		hci_chan_del(hchan); -		return NULL; -	} - -	kref_init(&conn->ref); -	hcon->l2cap_data = conn; -	conn->hcon = hcon; -	hci_conn_get(conn->hcon); -	conn->hchan = hchan; - -	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); - -	switch (hcon->type) { -	case LE_LINK: -		if (hcon->hdev->le_mtu) { -			conn->mtu = hcon->hdev->le_mtu; -			break; -		} -		/* fall through */ -	default: -		conn->mtu = hcon->hdev->acl_mtu; -		break; -	} - -	conn->src = &hcon->hdev->bdaddr; -	conn->dst = &hcon->dst; - -	conn->feat_mask = 0; - -	spin_lock_init(&conn->lock); -	mutex_init(&conn->chan_lock); - -	INIT_LIST_HEAD(&conn->chan_l); -	INIT_LIST_HEAD(&conn->users); - -	if (hcon->type == LE_LINK) -		INIT_DELAYED_WORK(&conn->security_timer, security_timeout); -	else -		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); - -	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; - -	return conn; -} -  static void l2cap_conn_free(struct kref *ref)  {  	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); @@ -1681,33 +1745,38 @@ EXPORT_SYMBOL(l2cap_conn_put);   */  static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,  						   bdaddr_t *src, -						   bdaddr_t *dst) +						   bdaddr_t *dst, +						   u8 link_type)  {  	struct l2cap_chan *c, *c1 = NULL;  	read_lock(&chan_list_lock);  	list_for_each_entry(c, &chan_list, global_l) { -		struct sock *sk = c->sk; -  		if (state && c->state != state)  			continue; +		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) +			continue; + +		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) +			continue; +  		if (c->psm == psm) {  			int src_match, dst_match;  			int src_any, dst_any;  			/* Exact match. */ -			src_match = !bacmp(&bt_sk(sk)->src, src); -			dst_match = !bacmp(&bt_sk(sk)->dst, dst); +			src_match = !bacmp(&c->src, src); +			dst_match = !bacmp(&c->dst, dst);  			if (src_match && dst_match) {  				read_unlock(&chan_list_lock);  				return c;  			}  			/* Closest match */ -			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY); -			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY); +			src_any = !bacmp(&c->src, BDADDR_ANY); +			dst_any = !bacmp(&c->dst, BDADDR_ANY);  			if ((src_match && dst_any) || (src_any && dst_match) ||  			    (src_any && dst_any))  				c1 = c; @@ -1719,174 +1788,6 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,  	return c1;  } -int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, -		       bdaddr_t *dst, u8 dst_type) -{ -	struct sock *sk = chan->sk; -	bdaddr_t *src = &bt_sk(sk)->src; -	struct l2cap_conn *conn; -	struct hci_conn *hcon; -	struct hci_dev *hdev; -	__u8 auth_type; -	int err; - -	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst, -	       dst_type, __le16_to_cpu(psm)); - -	hdev = hci_get_route(dst, src); -	if (!hdev) -		return -EHOSTUNREACH; - -	hci_dev_lock(hdev); - -	l2cap_chan_lock(chan); - -	/* PSM must be odd and lsb of upper byte must be 0 */ -	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && -	    chan->chan_type != L2CAP_CHAN_RAW) { -		err = -EINVAL; -		goto done; -	} - -	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) { -		err = -EINVAL; -		goto done; -	} - -	switch (chan->mode) { -	case L2CAP_MODE_BASIC: -		break; -	case L2CAP_MODE_ERTM: -	case L2CAP_MODE_STREAMING: -		if (!disable_ertm) -			break; -		/* fall through */ -	default: -		err = -ENOTSUPP; -		goto done; -	} - -	switch (chan->state) { -	case BT_CONNECT: -	case BT_CONNECT2: -	case BT_CONFIG: -		/* Already connecting */ -		err = 0; -		goto done; - -	case BT_CONNECTED: -		/* Already connected */ -		err = -EISCONN; -		goto done; - -	case BT_OPEN: -	case BT_BOUND: -		/* Can connect */ -		break; - -	default: -		err = -EBADFD; -		goto done; -	} - -	/* Set destination address and psm */ -	lock_sock(sk); -	bacpy(&bt_sk(sk)->dst, dst); -	release_sock(sk); - -	chan->psm = psm; -	chan->dcid = cid; - -	auth_type = l2cap_get_auth_type(chan); - -	if (bdaddr_type_is_le(dst_type)) -		hcon = hci_connect(hdev, LE_LINK, dst, dst_type, -				   chan->sec_level, auth_type); -	else -		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type, -				   chan->sec_level, auth_type); - -	if (IS_ERR(hcon)) { -		err = PTR_ERR(hcon); -		goto done; -	} - -	conn = l2cap_conn_add(hcon); -	if (!conn) { -		hci_conn_drop(hcon); -		err = -ENOMEM; -		goto done; -	} - -	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { -		hci_conn_drop(hcon); -		err = -EBUSY; -		goto done; -	} - -	/* Update source addr of the socket */ -	bacpy(src, conn->src); - -	l2cap_chan_unlock(chan); -	l2cap_chan_add(conn, chan); -	l2cap_chan_lock(chan); - -	/* l2cap_chan_add takes its own ref so we can drop this one */ -	hci_conn_drop(hcon); - -	l2cap_state_change(chan, BT_CONNECT); -	__set_chan_timer(chan, sk->sk_sndtimeo); - -	if (hcon->state == BT_CONNECTED) { -		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { -			__clear_chan_timer(chan); -			if (l2cap_chan_check_security(chan)) -				l2cap_state_change(chan, BT_CONNECTED); -		} else -			l2cap_do_start(chan); -	} - -	err = 0; - -done: -	l2cap_chan_unlock(chan); -	hci_dev_unlock(hdev); -	hci_dev_put(hdev); -	return err; -} - -int __l2cap_wait_ack(struct sock *sk) -{ -	struct l2cap_chan *chan = l2cap_pi(sk)->chan; -	DECLARE_WAITQUEUE(wait, current); -	int err = 0; -	int timeo = HZ/5; - -	add_wait_queue(sk_sleep(sk), &wait); -	set_current_state(TASK_INTERRUPTIBLE); -	while (chan->unacked_frames > 0 && chan->conn) { -		if (!timeo) -			timeo = HZ/5; - -		if (signal_pending(current)) { -			err = sock_intr_errno(timeo); -			break; -		} - -		release_sock(sk); -		timeo = schedule_timeout(timeo); -		lock_sock(sk); -		set_current_state(TASK_INTERRUPTIBLE); - -		err = sock_error(sk); -		if (err) -			break; -	} -	set_current_state(TASK_RUNNING); -	remove_wait_queue(sk_sleep(sk), &wait); -	return err; -} -  static void l2cap_monitor_timeout(struct work_struct *work)  {  	struct l2cap_chan *chan = container_of(work, struct l2cap_chan, @@ -2263,7 +2164,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,  	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;  	struct l2cap_hdr *lh; -	BT_DBG("chan %p len %zu priority %u", chan, len, priority); +	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan, +	       __le16_to_cpu(chan->psm), len, priority);  	count = min_t(unsigned int, (conn->mtu - hlen), len); @@ -2278,7 +2180,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,  	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);  	lh->cid = cpu_to_le16(chan->dcid);  	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); -	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE)); +	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));  	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);  	if (unlikely(err < 0)) { @@ -2445,6 +2347,89 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,  	return 0;  } +static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, +						   struct msghdr *msg, +						   size_t len, u16 sdulen) +{ +	struct l2cap_conn *conn = chan->conn; +	struct sk_buff *skb; +	int err, count, hlen; +	struct l2cap_hdr *lh; + +	BT_DBG("chan %p len %zu", chan, len); + +	if (!conn) +		return ERR_PTR(-ENOTCONN); + +	hlen = L2CAP_HDR_SIZE; + +	if (sdulen) +		hlen += L2CAP_SDULEN_SIZE; + +	count = min_t(unsigned int, (conn->mtu - hlen), len); + +	skb = chan->ops->alloc_skb(chan, count + hlen, +				   msg->msg_flags & MSG_DONTWAIT); +	if (IS_ERR(skb)) +		return skb; + +	/* Create L2CAP header */ +	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); +	lh->cid = cpu_to_le16(chan->dcid); +	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + +	if (sdulen) +		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); + +	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); +	if (unlikely(err < 0)) { +		kfree_skb(skb); +		return ERR_PTR(err); +	} + +	return skb; +} + +static int l2cap_segment_le_sdu(struct l2cap_chan *chan, +				struct sk_buff_head *seg_queue, +				struct msghdr *msg, size_t len) +{ +	struct sk_buff *skb; +	size_t pdu_len; +	u16 sdu_len; + +	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); + +	pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE; + +	pdu_len = min_t(size_t, pdu_len, chan->remote_mps); + +	sdu_len = len; +	pdu_len -= L2CAP_SDULEN_SIZE; + +	while (len > 0) { +		if (len <= pdu_len) +			pdu_len = len; + +		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); +		if (IS_ERR(skb)) { +			__skb_queue_purge(seg_queue); +			return PTR_ERR(skb); +		} + +		__skb_queue_tail(seg_queue, skb); + +		len -= pdu_len; + +		if (sdu_len) { +			sdu_len = 0; +			pdu_len += L2CAP_SDULEN_SIZE; +		} +	} + +	return 0; +} +  int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,  		    u32 priority)  { @@ -2452,17 +2437,62 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,  	int err;  	struct sk_buff_head seg_queue; +	if (!chan->conn) +		return -ENOTCONN; +  	/* Connectionless channel */  	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {  		skb = l2cap_create_connless_pdu(chan, msg, len, priority);  		if (IS_ERR(skb))  			return PTR_ERR(skb); +		/* Channel lock is released before requesting new skb and then +		 * reacquired thus we need to recheck channel state. +		 */ +		if (chan->state != BT_CONNECTED) { +			kfree_skb(skb); +			return -ENOTCONN; +		} +  		l2cap_do_send(chan, skb);  		return len;  	}  	switch (chan->mode) { +	case L2CAP_MODE_LE_FLOWCTL: +		/* Check outgoing MTU */ +		if (len > chan->omtu) +			return -EMSGSIZE; + +		if (!chan->tx_credits) +			return -EAGAIN; + +		__skb_queue_head_init(&seg_queue); + +		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); + +		if (chan->state != BT_CONNECTED) { +			__skb_queue_purge(&seg_queue); +			err = -ENOTCONN; +		} + +		if (err) +			return err; + +		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); + +		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { +			l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); +			chan->tx_credits--; +		} + +		if (!chan->tx_credits) +			chan->ops->suspend(chan); + +		err = len; + +		break; +  	case L2CAP_MODE_BASIC:  		/* Check outgoing MTU */  		if (len > chan->omtu) @@ -2473,6 +2503,14 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,  		if (IS_ERR(skb))  			return PTR_ERR(skb); +		/* Channel lock is released before requesting new skb and then +		 * reacquired thus we need to recheck channel state. +		 */ +		if (chan->state != BT_CONNECTED) { +			kfree_skb(skb); +			return -ENOTCONN; +		} +  		l2cap_do_send(chan, skb);  		err = len;  		break; @@ -2826,17 +2864,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)  	mutex_lock(&conn->chan_lock);  	list_for_each_entry(chan, &conn->chan_l, list) { -		struct sock *sk = chan->sk;  		if (chan->chan_type != L2CAP_CHAN_RAW)  			continue; -		/* Don't send frame to the socket it came from */ -		if (skb->sk == sk) +		/* Don't send frame to the channel it came from */ +		if (bt_cb(skb)->chan == chan)  			continue; +  		nskb = skb_clone(skb, GFP_KERNEL);  		if (!nskb)  			continue; -  		if (chan->ops->recv(chan, nskb))  			kfree_skb(nskb);  	} @@ -2870,9 +2907,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,  	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);  	if (conn->hcon->type == LE_LINK) -		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING); +		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);  	else -		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING); +		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);  	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);  	cmd->code  = code; @@ -2985,8 +3022,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)  		efs.stype	= chan->local_stype;  		efs.msdu	= cpu_to_le16(chan->local_msdu);  		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime); -		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); -		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); +		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); +		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);  		break;  	case L2CAP_MODE_STREAMING: @@ -3043,8 +3080,8 @@ int l2cap_ertm_init(struct l2cap_chan *chan)  	skb_queue_head_init(&chan->tx_q); -	chan->local_amp_id = 0; -	chan->move_id = 0; +	chan->local_amp_id = AMP_ID_BREDR; +	chan->move_id = AMP_ID_BREDR;  	chan->move_state = L2CAP_MOVE_STABLE;  	chan->move_role = L2CAP_MOVE_ROLE_NONE; @@ -3084,20 +3121,20 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)  	}  } -static inline bool __l2cap_ews_supported(struct l2cap_chan *chan) +static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)  { -	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; +	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;  } -static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) +static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)  { -	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; +	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;  }  static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,  				      struct l2cap_conf_rfc *rfc)  { -	if (chan->local_amp_id && chan->hs_hcon) { +	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {  		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;  		/* Class 1 devices have must have ERTM timeouts @@ -3127,15 +3164,15 @@ static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,  		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);  		rfc->monitor_timeout = rfc->retrans_timeout;  	} else { -		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); -		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); +		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); +		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);  	}  }  static inline void l2cap_txwin_setup(struct l2cap_chan *chan)  {  	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && -	    __l2cap_ews_supported(chan)) { +	    __l2cap_ews_supported(chan->conn)) {  		/* use extended control field */  		set_bit(FLAG_EXT_CTRL, &chan->flags);  		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; @@ -3165,7 +3202,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)  		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))  			break; -		if (__l2cap_efs_supported(chan)) +		if (__l2cap_efs_supported(chan->conn))  			set_bit(FLAG_EFS_ENABLE, &chan->flags);  		/* fall through */ @@ -3260,7 +3297,7 @@ done:  	}  	req->dcid  = cpu_to_le16(chan->dcid); -	req->flags = __constant_cpu_to_le16(0); +	req->flags = cpu_to_le16(0);  	return ptr - data;  } @@ -3317,7 +3354,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)  			break;  		case L2CAP_CONF_EWS: -			if (!enable_hs) +			if (!chan->conn->hs_enabled)  				return -ECONNREFUSED;  			set_bit(FLAG_EXT_CTRL, &chan->flags); @@ -3349,7 +3386,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)  		}  		if (remote_efs) { -			if (__l2cap_efs_supported(chan)) +			if (__l2cap_efs_supported(chan->conn))  				set_bit(FLAG_EFS_ENABLE, &chan->flags);  			else  				return -ECONNREFUSED; @@ -3474,7 +3511,7 @@ done:  	}  	rsp->scid   = cpu_to_le16(chan->dcid);  	rsp->result = cpu_to_le16(result); -	rsp->flags  = __constant_cpu_to_le16(0); +	rsp->flags  = cpu_to_le16(0);  	return ptr - data;  } @@ -3583,7 +3620,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,  	}  	req->dcid   = cpu_to_le16(chan->dcid); -	req->flags  = __constant_cpu_to_le16(0); +	req->flags  = cpu_to_le16(0);  	return ptr - data;  } @@ -3603,6 +3640,23 @@ static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,  	return ptr - data;  } +void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) +{ +	struct l2cap_le_conn_rsp rsp; +	struct l2cap_conn *conn = chan->conn; + +	BT_DBG("chan %p", chan); + +	rsp.dcid    = cpu_to_le16(chan->scid); +	rsp.mtu     = cpu_to_le16(chan->imtu); +	rsp.mps     = cpu_to_le16(chan->mps); +	rsp.credits = cpu_to_le16(chan->rx_credits); +	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS); + +	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), +		       &rsp); +} +  void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)  {  	struct l2cap_conn_rsp rsp; @@ -3612,8 +3666,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)  	rsp.scid   = cpu_to_le16(chan->dcid);  	rsp.dcid   = cpu_to_le16(chan->scid); -	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); -	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); +	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); +	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);  	if (chan->hs_hcon)  		rsp_code = L2CAP_CREATE_CHAN_RSP; @@ -3642,8 +3696,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)  	u16 txwin_ext = chan->ack_win;  	struct l2cap_conf_rfc rfc = {  		.mode = chan->mode, -		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), -		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), +		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), +		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),  		.max_pdu_size = cpu_to_le16(chan->imtu),  		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),  	}; @@ -3715,7 +3769,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;  	struct l2cap_conn_rsp rsp;  	struct l2cap_chan *chan = NULL, *pchan; -	struct sock *parent, *sk = NULL;  	int result, status = L2CAP_CS_NO_INFO;  	u16 dcid = 0, scid = __le16_to_cpu(req->scid); @@ -3724,19 +3777,18 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);  	/* Check if we have socket listening on psm */ -	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst); +	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, +					 &conn->hcon->dst, ACL_LINK);  	if (!pchan) {  		result = L2CAP_CR_BAD_PSM;  		goto sendresp;  	} -	parent = pchan->sk; -  	mutex_lock(&conn->chan_lock); -	lock_sock(parent); +	l2cap_chan_lock(pchan);  	/* Check if the ACL is secure enough (if not SDP) */ -	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && +	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&  	    !hci_conn_check_link_mode(conn->hcon)) {  		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;  		result = L2CAP_CR_SEC_BLOCK; @@ -3753,8 +3805,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  	if (!chan)  		goto response; -	sk = chan->sk; -  	/* For certain devices (ex: HID mouse), support for authentication,  	 * pairing and bonding is optional. For such devices, inorder to avoid  	 * the ACL alive for too long after L2CAP disconnection, reset the ACL @@ -3762,8 +3812,10 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  	 */  	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; -	bacpy(&bt_sk(sk)->src, conn->src); -	bacpy(&bt_sk(sk)->dst, conn->dst); +	bacpy(&chan->src, &conn->hcon->src); +	bacpy(&chan->dst, &conn->hcon->dst); +	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type); +	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);  	chan->psm  = psm;  	chan->dcid = scid;  	chan->local_amp_id = amp_id; @@ -3772,14 +3824,14 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  	dcid = chan->scid; -	__set_chan_timer(chan, sk->sk_sndtimeo); +	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));  	chan->ident = cmd->ident;  	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {  		if (l2cap_chan_check_security(chan)) { -			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { -				__l2cap_state_change(chan, BT_CONNECT2); +			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { +				l2cap_state_change(chan, BT_CONNECT2);  				result = L2CAP_CR_PEND;  				status = L2CAP_CS_AUTHOR_PEND;  				chan->ops->defer(chan); @@ -3788,28 +3840,28 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,  				 * The connection will succeed after the  				 * physical link is up.  				 */ -				if (amp_id) { -					__l2cap_state_change(chan, BT_CONNECT2); -					result = L2CAP_CR_PEND; -				} else { -					__l2cap_state_change(chan, BT_CONFIG); +				if (amp_id == AMP_ID_BREDR) { +					l2cap_state_change(chan, BT_CONFIG);  					result = L2CAP_CR_SUCCESS; +				} else { +					l2cap_state_change(chan, BT_CONNECT2); +					result = L2CAP_CR_PEND;  				}  				status = L2CAP_CS_NO_INFO;  			}  		} else { -			__l2cap_state_change(chan, BT_CONNECT2); +			l2cap_state_change(chan, BT_CONNECT2);  			result = L2CAP_CR_PEND;  			status = L2CAP_CS_AUTHEN_PEND;  		}  	} else { -		__l2cap_state_change(chan, BT_CONNECT2); +		l2cap_state_change(chan, BT_CONNECT2);  		result = L2CAP_CR_PEND;  		status = L2CAP_CS_NO_INFO;  	}  response: -	release_sock(parent); +	l2cap_chan_unlock(pchan);  	mutex_unlock(&conn->chan_lock);  sendresp: @@ -3821,7 +3873,7 @@ sendresp:  	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {  		struct l2cap_info_req info; -		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); +		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);  		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;  		conn->info_ident = l2cap_get_ident(conn); @@ -3891,13 +3943,13 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,  	if (scid) {  		chan = __l2cap_get_chan_by_scid(conn, scid);  		if (!chan) { -			err = -EFAULT; +			err = -EBADSLT;  			goto unlock;  		}  	} else {  		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);  		if (!chan) { -			err = -EFAULT; +			err = -EBADSLT;  			goto unlock;  		}  	} @@ -3965,6 +4017,18 @@ static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,  					    L2CAP_CONF_SUCCESS, flags), data);  } +static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, +				   u16 scid, u16 dcid) +{ +	struct l2cap_cmd_rej_cid rej; + +	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); +	rej.scid = __cpu_to_le16(scid); +	rej.dcid = __cpu_to_le16(dcid); + +	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); +} +  static inline int l2cap_config_req(struct l2cap_conn *conn,  				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,  				   u8 *data) @@ -3984,18 +4048,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,  	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);  	chan = l2cap_get_chan_by_scid(conn, dcid); -	if (!chan) -		return -ENOENT; +	if (!chan) { +		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); +		return 0; +	}  	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { -		struct l2cap_cmd_rej_cid rej; - -		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID); -		rej.scid = cpu_to_le16(chan->scid); -		rej.dcid = cpu_to_le16(chan->dcid); - -		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, -			       sizeof(rej), &rej); +		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, +				       chan->dcid);  		goto unlock;  	} @@ -4198,7 +4258,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,  	struct l2cap_disconn_rsp rsp;  	u16 dcid, scid;  	struct l2cap_chan *chan; -	struct sock *sk;  	if (cmd_len != sizeof(*req))  		return -EPROTO; @@ -4213,20 +4272,17 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,  	chan = __l2cap_get_chan_by_scid(conn, dcid);  	if (!chan) {  		mutex_unlock(&conn->chan_lock); +		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);  		return 0;  	}  	l2cap_chan_lock(chan); -	sk = chan->sk; -  	rsp.dcid = cpu_to_le16(chan->scid);  	rsp.scid = cpu_to_le16(chan->dcid);  	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); -	lock_sock(sk); -	sk->sk_shutdown = SHUTDOWN_MASK; -	release_sock(sk); +	chan->ops->set_shutdown(chan);  	l2cap_chan_hold(chan);  	l2cap_chan_del(chan, ECONNRESET); @@ -4298,12 +4354,12 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,  		u8 buf[8];  		u32 feat_mask = l2cap_feat_mask;  		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; -		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); -		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); +		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK); +		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);  		if (!disable_ertm)  			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING  				| L2CAP_FEAT_FCS; -		if (enable_hs) +		if (conn->hs_enabled)  			feat_mask |= L2CAP_FEAT_EXT_FLOW  				| L2CAP_FEAT_EXT_WINDOW; @@ -4314,20 +4370,20 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,  		u8 buf[12];  		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; -		if (enable_hs) +		if (conn->hs_enabled)  			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;  		else  			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; -		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); -		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); +		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN); +		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);  		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));  		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),  			       buf);  	} else {  		struct l2cap_info_rsp rsp;  		rsp.type   = cpu_to_le16(type); -		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); +		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);  		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),  			       &rsp);  	} @@ -4372,7 +4428,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,  		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {  			struct l2cap_info_req req; -			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); +			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);  			conn->info_ident = l2cap_get_ident(conn); @@ -4411,7 +4467,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,  	if (cmd_len != sizeof(*req))  		return -EPROTO; -	if (!enable_hs) +	if (!conn->hs_enabled)  		return -EINVAL;  	psm = le16_to_cpu(req->psm); @@ -4420,7 +4476,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,  	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);  	/* For controller id 0 make BR/EDR connection */ -	if (req->amp_id == HCI_BREDR_ID) { +	if (req->amp_id == AMP_ID_BREDR) {  		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,  			      req->amp_id);  		return 0; @@ -4442,10 +4498,13 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,  		struct amp_mgr *mgr = conn->hcon->amp_mgr;  		struct hci_conn *hs_hcon; -		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst); +		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, +						  &conn->hcon->dst);  		if (!hs_hcon) {  			hci_dev_put(hdev); -			return -EFAULT; +			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, +					       chan->dcid); +			return 0;  		}  		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); @@ -4463,13 +4522,13 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,  error:  	rsp.dcid = 0;  	rsp.scid = cpu_to_le16(scid); -	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP); -	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); +	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); +	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);  	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,  		       sizeof(rsp), &rsp); -	return -EFAULT; +	return 0;  }  static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) @@ -4528,7 +4587,7 @@ static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)  	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);  	cfm.icid = cpu_to_le16(icid); -	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED); +	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);  	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,  		       sizeof(cfm), &cfm); @@ -4655,7 +4714,7 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,  	if (chan->state != BT_CONNECTED) {  		/* Ignore logical link if channel is on BR/EDR */ -		if (chan->local_amp_id) +		if (chan->local_amp_id != AMP_ID_BREDR)  			l2cap_logical_finish_create(chan, hchan);  	} else {  		l2cap_logical_finish_move(chan, hchan); @@ -4666,7 +4725,7 @@ void l2cap_move_start(struct l2cap_chan *chan)  {  	BT_DBG("chan %p", chan); -	if (chan->local_amp_id == HCI_BREDR_ID) { +	if (chan->local_amp_id == AMP_ID_BREDR) {  		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)  			return;  		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; @@ -4711,19 +4770,19 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,  		if (result == L2CAP_CR_SUCCESS) {  			/* Send successful response */ -			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); -			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); +			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); +			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);  		} else {  			/* Send negative response */ -			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM); -			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); +			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); +			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);  		}  		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,  			       sizeof(rsp), &rsp);  		if (result == L2CAP_CR_SUCCESS) { -			__l2cap_state_change(chan, BT_CONFIG); +			l2cap_state_change(chan, BT_CONFIG);  			set_bit(CONF_REQ_SENT, &chan->conf_state);  			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),  				       L2CAP_CONF_REQ, @@ -4838,13 +4897,13 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,  	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); -	if (!enable_hs) +	if (!conn->hs_enabled)  		return -EINVAL;  	chan = l2cap_get_chan_by_dcid(conn, icid);  	if (!chan) {  		rsp.icid = cpu_to_le16(icid); -		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED); +		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);  		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,  			       sizeof(rsp), &rsp);  		return 0; @@ -4865,7 +4924,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,  		goto send_move_response;  	} -	if (req->dest_amp_id) { +	if (req->dest_amp_id != AMP_ID_BREDR) {  		struct hci_dev *hdev;  		hdev = hci_dev_get(req->dest_amp_id);  		if (!hdev || hdev->dev_type != HCI_AMP || @@ -4885,7 +4944,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,  	 */  	if ((__chan_is_moving(chan) ||  	     chan->move_role != L2CAP_MOVE_ROLE_NONE) && -	    bacmp(conn->src, conn->dst) > 0) { +	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {  		result = L2CAP_MR_COLLISION;  		goto send_move_response;  	} @@ -4895,7 +4954,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,  	chan->move_id = req->dest_amp_id;  	icid = chan->dcid; -	if (!req->dest_amp_id) { +	if (req->dest_amp_id == AMP_ID_BREDR) {  		/* Moving to BR/EDR */  		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {  			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; @@ -5087,7 +5146,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,  	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {  		if (result == L2CAP_MC_CONFIRMED) {  			chan->local_amp_id = chan->move_id; -			if (!chan->local_amp_id) +			if (chan->local_amp_id == AMP_ID_BREDR)  				__release_logical_link(chan);  		} else {  			chan->move_id = chan->local_amp_id; @@ -5127,7 +5186,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,  	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {  		chan->local_amp_id = chan->move_id; -		if (!chan->local_amp_id && chan->hs_hchan) +		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)  			__release_logical_link(chan);  		l2cap_move_done(chan); @@ -5161,18 +5220,17 @@ static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,  static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,  					      struct l2cap_cmd_hdr *cmd, -					      u8 *data) +					      u16 cmd_len, u8 *data)  {  	struct hci_conn *hcon = conn->hcon;  	struct l2cap_conn_param_update_req *req;  	struct l2cap_conn_param_update_rsp rsp; -	u16 min, max, latency, to_multiplier, cmd_len; +	u16 min, max, latency, to_multiplier;  	int err;  	if (!(hcon->link_mode & HCI_LM_MASTER))  		return -EINVAL; -	cmd_len = __le16_to_cpu(cmd->len);  	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))  		return -EPROTO; @@ -5189,9 +5247,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,  	err = l2cap_check_conn_param(min, max, latency, to_multiplier);  	if (err) -		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); +		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);  	else -		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); +		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);  	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,  		       sizeof(rsp), &rsp); @@ -5202,6 +5260,65 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,  	return 0;  } +static int l2cap_le_connect_rsp(struct l2cap_conn *conn, +				struct l2cap_cmd_hdr *cmd, u16 cmd_len, +				u8 *data) +{ +	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; +	u16 dcid, mtu, mps, credits, result; +	struct l2cap_chan *chan; +	int err; + +	if (cmd_len < sizeof(*rsp)) +		return -EPROTO; + +	dcid    = __le16_to_cpu(rsp->dcid); +	mtu     = __le16_to_cpu(rsp->mtu); +	mps     = __le16_to_cpu(rsp->mps); +	credits = __le16_to_cpu(rsp->credits); +	result  = __le16_to_cpu(rsp->result); + +	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23)) +		return -EPROTO; + +	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", +	       dcid, mtu, mps, credits, result); + +	mutex_lock(&conn->chan_lock); + +	chan = __l2cap_get_chan_by_ident(conn, cmd->ident); +	if (!chan) { +		err = -EBADSLT; +		goto unlock; +	} + +	err = 0; + +	l2cap_chan_lock(chan); + +	switch (result) { +	case L2CAP_CR_SUCCESS: +		chan->ident = 0; +		chan->dcid = dcid; +		chan->omtu = mtu; +		chan->remote_mps = mps; +		chan->tx_credits = credits; +		l2cap_chan_ready(chan); +		break; + +	default: +		l2cap_chan_del(chan, ECONNREFUSED); +		break; +	} + +	l2cap_chan_unlock(chan); + +unlock: +	mutex_unlock(&conn->chan_lock); + +	return err; +} +  static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,  				      u8 *data) @@ -5219,7 +5336,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  	case L2CAP_CONN_RSP:  	case L2CAP_CREATE_CHAN_RSP: -		err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data); +		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);  		break;  	case L2CAP_CONF_REQ: @@ -5227,7 +5344,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  		break;  	case L2CAP_CONF_RSP: -		err = l2cap_config_rsp(conn, cmd, cmd_len, data); +		l2cap_config_rsp(conn, cmd, cmd_len, data);  		break;  	case L2CAP_DISCONN_REQ: @@ -5235,7 +5352,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  		break;  	case L2CAP_DISCONN_RSP: -		err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data); +		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);  		break;  	case L2CAP_ECHO_REQ: @@ -5250,7 +5367,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  		break;  	case L2CAP_INFO_RSP: -		err = l2cap_information_rsp(conn, cmd, cmd_len, data); +		l2cap_information_rsp(conn, cmd, cmd_len, data);  		break;  	case L2CAP_CREATE_CHAN_REQ: @@ -5262,7 +5379,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  		break;  	case L2CAP_MOVE_CHAN_RSP: -		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data); +		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);  		break;  	case L2CAP_MOVE_CHAN_CFM: @@ -5270,7 +5387,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  		break;  	case L2CAP_MOVE_CHAN_CFM_RSP: -		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); +		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);  		break;  	default: @@ -5282,73 +5399,282 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,  	return err;  } +static int l2cap_le_connect_req(struct l2cap_conn *conn, +				struct l2cap_cmd_hdr *cmd, u16 cmd_len, +				u8 *data) +{ +	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; +	struct l2cap_le_conn_rsp rsp; +	struct l2cap_chan *chan, *pchan; +	u16 dcid, scid, credits, mtu, mps; +	__le16 psm; +	u8 result; + +	if (cmd_len != sizeof(*req)) +		return -EPROTO; + +	scid = __le16_to_cpu(req->scid); +	mtu  = __le16_to_cpu(req->mtu); +	mps  = __le16_to_cpu(req->mps); +	psm  = req->psm; +	dcid = 0; +	credits = 0; + +	if (mtu < 23 || mps < 23) +		return -EPROTO; + +	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), +	       scid, mtu, mps); + +	/* Check if we have socket listening on psm */ +	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, +					 &conn->hcon->dst, LE_LINK); +	if (!pchan) { +		result = L2CAP_CR_BAD_PSM; +		chan = NULL; +		goto response; +	} + +	mutex_lock(&conn->chan_lock); +	l2cap_chan_lock(pchan); + +	if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) { +		result = L2CAP_CR_AUTHENTICATION; +		chan = NULL; +		goto response_unlock; +	} + +	/* Check if we already have channel with that dcid */ +	if (__l2cap_get_chan_by_dcid(conn, scid)) { +		result = L2CAP_CR_NO_MEM; +		chan = NULL; +		goto response_unlock; +	} + +	chan = pchan->ops->new_connection(pchan); +	if (!chan) { +		result = L2CAP_CR_NO_MEM; +		goto response_unlock; +	} + +	l2cap_le_flowctl_init(chan); + +	bacpy(&chan->src, &conn->hcon->src); +	bacpy(&chan->dst, &conn->hcon->dst); +	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type); +	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type); +	chan->psm  = psm; +	chan->dcid = scid; +	chan->omtu = mtu; +	chan->remote_mps = mps; +	chan->tx_credits = __le16_to_cpu(req->credits); + +	__l2cap_chan_add(conn, chan); +	dcid = chan->scid; +	credits = chan->rx_credits; + +	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + +	chan->ident = cmd->ident; + +	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { +		l2cap_state_change(chan, BT_CONNECT2); +		result = L2CAP_CR_PEND; +		chan->ops->defer(chan); +	} else { +		l2cap_chan_ready(chan); +		result = L2CAP_CR_SUCCESS; +	} + +response_unlock: +	l2cap_chan_unlock(pchan); +	mutex_unlock(&conn->chan_lock); + +	if (result == L2CAP_CR_PEND) +		return 0; + +response: +	if (chan) { +		rsp.mtu = cpu_to_le16(chan->imtu); +		rsp.mps = cpu_to_le16(chan->mps); +	} else { +		rsp.mtu = 0; +		rsp.mps = 0; +	} + +	rsp.dcid    = cpu_to_le16(dcid); +	rsp.credits = cpu_to_le16(credits); +	rsp.result  = cpu_to_le16(result); + +	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); + +	return 0; +} + +static inline int l2cap_le_credits(struct l2cap_conn *conn, +				   struct l2cap_cmd_hdr *cmd, u16 cmd_len, +				   u8 *data) +{ +	struct l2cap_le_credits *pkt; +	struct l2cap_chan *chan; +	u16 cid, credits, max_credits; + +	if (cmd_len != sizeof(*pkt)) +		return -EPROTO; + +	pkt = (struct l2cap_le_credits *) data; +	cid	= __le16_to_cpu(pkt->cid); +	credits	= __le16_to_cpu(pkt->credits); + +	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); + +	chan = l2cap_get_chan_by_dcid(conn, cid); +	if (!chan) +		return -EBADSLT; + +	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; +	if (credits > max_credits) { +		BT_ERR("LE credits overflow"); +		l2cap_send_disconn_req(chan, ECONNRESET); + +		/* Return 0 so that we don't trigger an unnecessary +		 * command reject packet. +		 */ +		return 0; +	} + +	chan->tx_credits += credits; + +	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { +		l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); +		chan->tx_credits--; +	} + +	if (chan->tx_credits) +		chan->ops->resume(chan); + +	l2cap_chan_unlock(chan); + +	return 0; +} + +static inline int l2cap_le_command_rej(struct l2cap_conn *conn, +				       struct l2cap_cmd_hdr *cmd, u16 cmd_len, +				       u8 *data) +{ +	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; +	struct l2cap_chan *chan; + +	if (cmd_len < sizeof(*rej)) +		return -EPROTO; + +	mutex_lock(&conn->chan_lock); + +	chan = __l2cap_get_chan_by_ident(conn, cmd->ident); +	if (!chan) +		goto done; + +	l2cap_chan_lock(chan); +	l2cap_chan_del(chan, ECONNREFUSED); +	l2cap_chan_unlock(chan); + +done: +	mutex_unlock(&conn->chan_lock); +	return 0; +} +  static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, -				   struct l2cap_cmd_hdr *cmd, u8 *data) +				   struct l2cap_cmd_hdr *cmd, u16 cmd_len, +				   u8 *data)  { +	int err = 0; +  	switch (cmd->code) {  	case L2CAP_COMMAND_REJ: -		return 0; +		l2cap_le_command_rej(conn, cmd, cmd_len, data); +		break;  	case L2CAP_CONN_PARAM_UPDATE_REQ: -		return l2cap_conn_param_update_req(conn, cmd, data); +		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); +		break;  	case L2CAP_CONN_PARAM_UPDATE_RSP: -		return 0; +		break; + +	case L2CAP_LE_CONN_RSP: +		l2cap_le_connect_rsp(conn, cmd, cmd_len, data); +		break; + +	case L2CAP_LE_CONN_REQ: +		err = l2cap_le_connect_req(conn, cmd, cmd_len, data); +		break; + +	case L2CAP_LE_CREDITS: +		err = l2cap_le_credits(conn, cmd, cmd_len, data); +		break; + +	case L2CAP_DISCONN_REQ: +		err = l2cap_disconnect_req(conn, cmd, cmd_len, data); +		break; + +	case L2CAP_DISCONN_RSP: +		l2cap_disconnect_rsp(conn, cmd, cmd_len, data); +		break;  	default:  		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); -		return -EINVAL; +		err = -EINVAL; +		break;  	} + +	return err;  }  static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,  					struct sk_buff *skb)  { -	u8 *data = skb->data; -	int len = skb->len; -	struct l2cap_cmd_hdr cmd; +	struct hci_conn *hcon = conn->hcon; +	struct l2cap_cmd_hdr *cmd; +	u16 len;  	int err; -	l2cap_raw_recv(conn, skb); +	if (hcon->type != LE_LINK) +		goto drop; -	while (len >= L2CAP_CMD_HDR_SIZE) { -		u16 cmd_len; -		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); -		data += L2CAP_CMD_HDR_SIZE; -		len  -= L2CAP_CMD_HDR_SIZE; +	if (skb->len < L2CAP_CMD_HDR_SIZE) +		goto drop; -		cmd_len = le16_to_cpu(cmd.len); +	cmd = (void *) skb->data; +	skb_pull(skb, L2CAP_CMD_HDR_SIZE); -		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, -		       cmd.ident); +	len = le16_to_cpu(cmd->len); -		if (cmd_len > len || !cmd.ident) { -			BT_DBG("corrupted command"); -			break; -		} +	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); -		err = l2cap_le_sig_cmd(conn, &cmd, data); -		if (err) { -			struct l2cap_cmd_rej_unk rej; +	if (len != skb->len || !cmd->ident) { +		BT_DBG("corrupted command"); +		goto drop; +	} -			BT_ERR("Wrong link type (%d)", err); +	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); +	if (err) { +		struct l2cap_cmd_rej_unk rej; -			/* FIXME: Map err to a valid reason */ -			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); -			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, -				       sizeof(rej), &rej); -		} +		BT_ERR("Wrong link type (%d)", err); -		data += cmd_len; -		len  -= cmd_len; +		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); +		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, +			       sizeof(rej), &rej);  	} +drop:  	kfree_skb(skb);  }  static inline void l2cap_sig_channel(struct l2cap_conn *conn,  				     struct sk_buff *skb)  { +	struct hci_conn *hcon = conn->hcon;  	u8 *data = skb->data;  	int len = skb->len;  	struct l2cap_cmd_hdr cmd; @@ -5356,6 +5682,9 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,  	l2cap_raw_recv(conn, skb); +	if (hcon->type != ACL_LINK) +		goto drop; +  	while (len >= L2CAP_CMD_HDR_SIZE) {  		u16 cmd_len;  		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); @@ -5378,8 +5707,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,  			BT_ERR("Wrong link type (%d)", err); -			/* FIXME: Map err to a valid reason */ -			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); +			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);  			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,  				       sizeof(rej), &rej);  		} @@ -5388,6 +5716,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,  		len  -= cmd_len;  	} +drop:  	kfree_skb(skb);  } @@ -5784,7 +6113,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,  			       struct sk_buff *skb, u8 event)  {  	int err = 0; -	bool skb_in_use = 0; +	bool skb_in_use = false;  	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,  	       event); @@ -5805,7 +6134,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,  							   control->txseq);  			chan->buffer_seq = chan->expected_tx_seq; -			skb_in_use = 1; +			skb_in_use = true;  			err = l2cap_reassemble_sdu(chan, skb, control);  			if (err) @@ -5841,7 +6170,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,  			 * current frame is stored for later use.  			 */  			skb_queue_tail(&chan->srej_q, skb); -			skb_in_use = 1; +			skb_in_use = true;  			BT_DBG("Queued %p (queue len %d)", skb,  			       skb_queue_len(&chan->srej_q)); @@ -5919,7 +6248,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,  {  	int err = 0;  	u16 txseq = control->txseq; -	bool skb_in_use = 0; +	bool skb_in_use = false;  	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,  	       event); @@ -5931,7 +6260,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,  			/* Keep frame for reassembly later */  			l2cap_pass_to_tx(chan, control);  			skb_queue_tail(&chan->srej_q, skb); -			skb_in_use = 1; +			skb_in_use = true;  			BT_DBG("Queued %p (queue len %d)", skb,  			       skb_queue_len(&chan->srej_q)); @@ -5942,7 +6271,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,  			l2cap_pass_to_tx(chan, control);  			skb_queue_tail(&chan->srej_q, skb); -			skb_in_use = 1; +			skb_in_use = true;  			BT_DBG("Queued %p (queue len %d)", skb,  			       skb_queue_len(&chan->srej_q)); @@ -5957,7 +6286,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,  			 * the missing frames.  			 */  			skb_queue_tail(&chan->srej_q, skb); -			skb_in_use = 1; +			skb_in_use = true;  			BT_DBG("Queued %p (queue len %d)", skb,  			       skb_queue_len(&chan->srej_q)); @@ -5971,7 +6300,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,  			 * SREJ'd frames.  			 */  			skb_queue_tail(&chan->srej_q, skb); -			skb_in_use = 1; +			skb_in_use = true;  			BT_DBG("Queued %p (queue len %d)", skb,  			       skb_queue_len(&chan->srej_q)); @@ -6318,6 +6647,122 @@ drop:  	return 0;  } +static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) +{ +	struct l2cap_conn *conn = chan->conn; +	struct l2cap_le_credits pkt; +	u16 return_credits; + +	/* We return more credits to the sender only after the amount of +	 * credits falls below half of the initial amount. +	 */ +	if (chan->rx_credits >= (le_max_credits + 1) / 2) +		return; + +	return_credits = le_max_credits - chan->rx_credits; + +	BT_DBG("chan %p returning %u credits to sender", chan, return_credits); + +	chan->rx_credits += return_credits; + +	pkt.cid     = cpu_to_le16(chan->scid); +	pkt.credits = cpu_to_le16(return_credits); + +	chan->ident = l2cap_get_ident(conn); + +	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); +} + +static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +{ +	int err; + +	if (!chan->rx_credits) { +		BT_ERR("No credits to receive LE L2CAP data"); +		l2cap_send_disconn_req(chan, ECONNRESET); +		return -ENOBUFS; +	} + +	if (chan->imtu < skb->len) { +		BT_ERR("Too big LE L2CAP PDU"); +		return -ENOBUFS; +	} + +	chan->rx_credits--; +	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); + +	l2cap_chan_le_send_credits(chan); + +	err = 0; + +	if (!chan->sdu) { +		u16 sdu_len; + +		sdu_len = get_unaligned_le16(skb->data); +		skb_pull(skb, L2CAP_SDULEN_SIZE); + +		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", +		       sdu_len, skb->len, chan->imtu); + +		if (sdu_len > chan->imtu) { +			BT_ERR("Too big LE L2CAP SDU length received"); +			err = -EMSGSIZE; +			goto failed; +		} + +		if (skb->len > sdu_len) { +			BT_ERR("Too much LE L2CAP data received"); +			err = -EINVAL; +			goto failed; +		} + +		if (skb->len == sdu_len) +			return chan->ops->recv(chan, skb); + +		chan->sdu = skb; +		chan->sdu_len = sdu_len; +		chan->sdu_last_frag = skb; + +		return 0; +	} + +	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", +	       chan->sdu->len, skb->len, chan->sdu_len); + +	if (chan->sdu->len + skb->len > chan->sdu_len) { +		BT_ERR("Too much LE L2CAP data received"); +		err = -EINVAL; +		goto failed; +	} + +	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); +	skb = NULL; + +	if (chan->sdu->len == chan->sdu_len) { +		err = chan->ops->recv(chan, chan->sdu); +		if (!err) { +			chan->sdu = NULL; +			chan->sdu_last_frag = NULL; +			chan->sdu_len = 0; +		} +	} + +failed: +	if (err) { +		kfree_skb(skb); +		kfree_skb(chan->sdu); +		chan->sdu = NULL; +		chan->sdu_last_frag = NULL; +		chan->sdu_len = 0; +	} + +	/* We can't return an error here since we took care of the skb +	 * freeing internally. An error return would cause the caller to +	 * do a double-free of the skb. +	 */ +	return 0; +} +  static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,  			       struct sk_buff *skb)  { @@ -6347,14 +6792,22 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,  		goto drop;  	switch (chan->mode) { +	case L2CAP_MODE_LE_FLOWCTL: +		if (l2cap_le_data_rcv(chan, skb) < 0) +			goto drop; + +		goto done; +  	case L2CAP_MODE_BASIC:  		/* If socket recv buffers overflows we drop data here  		 * which is *bad* because L2CAP has to be reliable.  		 * But we don't have any other choice. L2CAP doesn't  		 * provide flow control mechanism. */ -		if (chan->imtu < skb->len) +		if (chan->imtu < skb->len) { +			BT_ERR("Dropping L2CAP data: receive buffer overflow");  			goto drop; +		}  		if (!chan->ops->recv(chan, skb))  			goto done; @@ -6380,9 +6833,14 @@ done:  static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,  				  struct sk_buff *skb)  { +	struct hci_conn *hcon = conn->hcon;  	struct l2cap_chan *chan; -	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst); +	if (hcon->type != ACL_LINK) +		goto drop; + +	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, +					ACL_LINK);  	if (!chan)  		goto drop; @@ -6394,6 +6852,10 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,  	if (chan->imtu < skb->len)  		goto drop; +	/* Store remote BD_ADDR and PSM for msg_name */ +	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst); +	bt_cb(skb)->psm = psm; +  	if (!chan->ops->recv(chan, skb))  		return; @@ -6404,15 +6866,22 @@ drop:  static void l2cap_att_channel(struct l2cap_conn *conn,  			      struct sk_buff *skb)  { +	struct hci_conn *hcon = conn->hcon;  	struct l2cap_chan *chan; +	if (hcon->type != LE_LINK) +		goto drop; +  	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT, -					 conn->src, conn->dst); +					 &hcon->src, &hcon->dst);  	if (!chan)  		goto drop;  	BT_DBG("chan %p, len %d", chan, skb->len); +	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type)) +		goto drop; +  	if (chan->imtu < skb->len)  		goto drop; @@ -6426,9 +6895,16 @@ drop:  static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct l2cap_hdr *lh = (void *) skb->data; +	struct hci_conn *hcon = conn->hcon;  	u16 cid, len;  	__le16 psm; +	if (hcon->state != BT_CONNECTED) { +		BT_DBG("queueing pending rx skb"); +		skb_queue_tail(&conn->pending_rx, skb); +		return; +	} +  	skb_pull(skb, L2CAP_HDR_SIZE);  	cid = __le16_to_cpu(lh->cid);  	len = __le16_to_cpu(lh->len); @@ -6441,9 +6917,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)  	BT_DBG("len %d, cid 0x%4.4x", len, cid);  	switch (cid) { -	case L2CAP_CID_LE_SIGNALING: -		l2cap_le_sig_channel(conn, skb); -		break;  	case L2CAP_CID_SIGNALING:  		l2cap_sig_channel(conn, skb);  		break; @@ -6458,17 +6931,266 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)  		l2cap_att_channel(conn, skb);  		break; +	case L2CAP_CID_LE_SIGNALING: +		l2cap_le_sig_channel(conn, skb); +		break; +  	case L2CAP_CID_SMP:  		if (smp_sig_channel(conn, skb))  			l2cap_conn_del(conn->hcon, EACCES);  		break; +	case L2CAP_FC_6LOWPAN: +		bt_6lowpan_recv(conn, skb); +		break; +  	default:  		l2cap_data_channel(conn, cid, skb);  		break;  	}  } +static void process_pending_rx(struct work_struct *work) +{ +	struct l2cap_conn *conn = container_of(work, struct l2cap_conn, +					       pending_rx_work); +	struct sk_buff *skb; + +	BT_DBG(""); + +	while ((skb = skb_dequeue(&conn->pending_rx))) +		l2cap_recv_frame(conn, skb); +} + +static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) +{ +	struct l2cap_conn *conn = hcon->l2cap_data; +	struct hci_chan *hchan; + +	if (conn) +		return conn; + +	hchan = hci_chan_create(hcon); +	if (!hchan) +		return NULL; + +	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL); +	if (!conn) { +		hci_chan_del(hchan); +		return NULL; +	} + +	kref_init(&conn->ref); +	hcon->l2cap_data = conn; +	conn->hcon = hcon; +	hci_conn_get(conn->hcon); +	conn->hchan = hchan; + +	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); + +	switch (hcon->type) { +	case LE_LINK: +		if (hcon->hdev->le_mtu) { +			conn->mtu = hcon->hdev->le_mtu; +			break; +		} +		/* fall through */ +	default: +		conn->mtu = hcon->hdev->acl_mtu; +		break; +	} + +	conn->feat_mask = 0; + +	if (hcon->type == ACL_LINK) +		conn->hs_enabled = test_bit(HCI_HS_ENABLED, +					    &hcon->hdev->dev_flags); + +	spin_lock_init(&conn->lock); +	mutex_init(&conn->chan_lock); + +	INIT_LIST_HEAD(&conn->chan_l); +	INIT_LIST_HEAD(&conn->users); + +	if (hcon->type == LE_LINK) +		INIT_DELAYED_WORK(&conn->security_timer, security_timeout); +	else +		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + +	skb_queue_head_init(&conn->pending_rx); +	INIT_WORK(&conn->pending_rx_work, process_pending_rx); + +	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; + +	return conn; +} + +static bool is_valid_psm(u16 psm, u8 dst_type) { +	if (!psm) +		return false; + +	if (bdaddr_type_is_le(dst_type)) +		return (psm <= 0x00ff); + +	/* PSM must be odd and lsb of upper byte must be 0 */ +	return ((psm & 0x0101) == 0x0001); +} + +int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, +		       bdaddr_t *dst, u8 dst_type) +{ +	struct l2cap_conn *conn; +	struct hci_conn *hcon; +	struct hci_dev *hdev; +	__u8 auth_type; +	int err; + +	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, +	       dst_type, __le16_to_cpu(psm)); + +	hdev = hci_get_route(dst, &chan->src); +	if (!hdev) +		return -EHOSTUNREACH; + +	hci_dev_lock(hdev); + +	l2cap_chan_lock(chan); + +	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && +	    chan->chan_type != L2CAP_CHAN_RAW) { +		err = -EINVAL; +		goto done; +	} + +	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { +		err = -EINVAL; +		goto done; +	} + +	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { +		err = -EINVAL; +		goto done; +	} + +	switch (chan->mode) { +	case L2CAP_MODE_BASIC: +		break; +	case L2CAP_MODE_LE_FLOWCTL: +		l2cap_le_flowctl_init(chan); +		break; +	case L2CAP_MODE_ERTM: +	case L2CAP_MODE_STREAMING: +		if (!disable_ertm) +			break; +		/* fall through */ +	default: +		err = -ENOTSUPP; +		goto done; +	} + +	switch (chan->state) { +	case BT_CONNECT: +	case BT_CONNECT2: +	case BT_CONFIG: +		/* Already connecting */ +		err = 0; +		goto done; + +	case BT_CONNECTED: +		/* Already connected */ +		err = -EISCONN; +		goto done; + +	case BT_OPEN: +	case BT_BOUND: +		/* Can connect */ +		break; + +	default: +		err = -EBADFD; +		goto done; +	} + +	/* Set destination address and psm */ +	bacpy(&chan->dst, dst); +	chan->dst_type = dst_type; + +	chan->psm = psm; +	chan->dcid = cid; + +	auth_type = l2cap_get_auth_type(chan); + +	if (bdaddr_type_is_le(dst_type)) { +		/* Convert from L2CAP channel address type to HCI address type +		 */ +		if (dst_type == BDADDR_LE_PUBLIC) +			dst_type = ADDR_LE_DEV_PUBLIC; +		else +			dst_type = ADDR_LE_DEV_RANDOM; + +		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level, +				      auth_type); +	} else { +		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); +	} + +	if (IS_ERR(hcon)) { +		err = PTR_ERR(hcon); +		goto done; +	} + +	conn = l2cap_conn_add(hcon); +	if (!conn) { +		hci_conn_drop(hcon); +		err = -ENOMEM; +		goto done; +	} + +	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { +		hci_conn_drop(hcon); +		err = -EBUSY; +		goto done; +	} + +	/* Update source addr of the socket */ +	bacpy(&chan->src, &hcon->src); +	chan->src_type = bdaddr_type(hcon, hcon->src_type); + +	l2cap_chan_unlock(chan); +	l2cap_chan_add(conn, chan); +	l2cap_chan_lock(chan); + +	/* l2cap_chan_add takes its own ref so we can drop this one */ +	hci_conn_drop(hcon); + +	l2cap_state_change(chan, BT_CONNECT); +	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + +	/* Release chan->sport so that it can be reused by other +	 * sockets (as it's only used for listening sockets). +	 */ +	write_lock(&chan_list_lock); +	chan->sport = 0; +	write_unlock(&chan_list_lock); + +	if (hcon->state == BT_CONNECTED) { +		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { +			__clear_chan_timer(chan); +			if (l2cap_chan_check_security(chan)) +				l2cap_state_change(chan, BT_CONNECTED); +		} else +			l2cap_do_start(chan); +	} + +	err = 0; + +done: +	l2cap_chan_unlock(chan); +	hci_dev_unlock(hdev); +	hci_dev_put(hdev); +	return err; +} +  /* ---- L2CAP interface with lower layer (HCI) ---- */  int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -6481,17 +7203,15 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)  	/* Find listening sockets and check their link_mode */  	read_lock(&chan_list_lock);  	list_for_each_entry(c, &chan_list, global_l) { -		struct sock *sk = c->sk; -  		if (c->state != BT_LISTEN)  			continue; -		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { +		if (!bacmp(&c->src, &hdev->bdaddr)) {  			lm1 |= HCI_LM_ACCEPT;  			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))  				lm1 |= HCI_LM_MASTER;  			exact++; -		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { +		} else if (!bacmp(&c->src, BDADDR_ANY)) {  			lm2 |= HCI_LM_ACCEPT;  			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))  				lm2 |= HCI_LM_MASTER; @@ -6532,6 +7252,8 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)  {  	BT_DBG("hcon %p reason %d", hcon, reason); +	bt_6lowpan_del_conn(hcon->l2cap_data); +  	l2cap_conn_del(hcon, bt_to_errno(reason));  } @@ -6543,7 +7265,8 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)  	if (encrypt == 0x00) {  		if (chan->sec_level == BT_SECURITY_MEDIUM) {  			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT); -		} else if (chan->sec_level == BT_SECURITY_HIGH) +		} else if (chan->sec_level == BT_SECURITY_HIGH || +			   chan->sec_level == BT_SECURITY_FIPS)  			l2cap_chan_close(chan, ECONNREFUSED);  	} else {  		if (chan->sec_level == BT_SECURITY_MEDIUM) @@ -6563,7 +7286,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)  	if (hcon->type == LE_LINK) {  		if (!status && encrypt) -			smp_distribute_keys(conn, 0); +			smp_distribute_keys(conn);  		cancel_delayed_work(&conn->security_timer);  	} @@ -6575,7 +7298,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)  		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,  		       state_to_string(chan->state)); -		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { +		if (chan->scid == L2CAP_CID_A2MP) {  			l2cap_chan_unlock(chan);  			continue;  		} @@ -6597,49 +7320,38 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)  		if (!status && (chan->state == BT_CONNECTED ||  				chan->state == BT_CONFIG)) { -			struct sock *sk = chan->sk; - -			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); -			sk->sk_state_change(sk); - +			chan->ops->resume(chan);  			l2cap_check_encryption(chan, encrypt);  			l2cap_chan_unlock(chan);  			continue;  		}  		if (chan->state == BT_CONNECT) { -			if (!status) { +			if (!status)  				l2cap_start_connection(chan); -			} else { +			else  				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT); -			}  		} else if (chan->state == BT_CONNECT2) { -			struct sock *sk = chan->sk;  			struct l2cap_conn_rsp rsp;  			__u16 res, stat; -			lock_sock(sk); -  			if (!status) { -				if (test_bit(BT_SK_DEFER_SETUP, -					     &bt_sk(sk)->flags)) { +				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {  					res = L2CAP_CR_PEND;  					stat = L2CAP_CS_AUTHOR_PEND;  					chan->ops->defer(chan);  				} else { -					__l2cap_state_change(chan, BT_CONFIG); +					l2cap_state_change(chan, BT_CONFIG);  					res = L2CAP_CR_SUCCESS;  					stat = L2CAP_CS_NO_INFO;  				}  			} else { -				__l2cap_state_change(chan, BT_DISCONN); +				l2cap_state_change(chan, BT_DISCONN);  				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);  				res = L2CAP_CR_SEC_BLOCK;  				stat = L2CAP_CS_NO_INFO;  			} -			release_sock(sk); -  			rsp.scid   = cpu_to_le16(chan->dcid);  			rsp.dcid   = cpu_to_le16(chan->scid);  			rsp.result = cpu_to_le16(res); @@ -6756,9 +7468,13 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)  		conn->rx_len -= skb->len;  		if (!conn->rx_len) { -			/* Complete frame received */ -			l2cap_recv_frame(conn, conn->rx_skb); +			/* Complete frame received. l2cap_recv_frame +			 * takes ownership of the skb so set the global +			 * rx_skb pointer to NULL first. +			 */ +			struct sk_buff *rx_skb = conn->rx_skb;  			conn->rx_skb = NULL; +			l2cap_recv_frame(conn, rx_skb);  		}  		break;  	} @@ -6775,10 +7491,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)  	read_lock(&chan_list_lock);  	list_for_each_entry(c, &chan_list, global_l) { -		struct sock *sk = c->sk; -  		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", -			   &bt_sk(sk)->src, &bt_sk(sk)->dst, +			   &c->src, &c->dst,  			   c->state, __le16_to_cpu(c->psm),  			   c->scid, c->dcid, c->imtu, c->omtu,  			   c->sec_level, c->mode); @@ -6811,18 +7525,25 @@ int __init l2cap_init(void)  	if (err < 0)  		return err; -	if (bt_debugfs) { -		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, -						    NULL, &l2cap_debugfs_fops); -		if (!l2cap_debugfs) -			BT_ERR("Failed to create L2CAP debug file"); -	} +	if (IS_ERR_OR_NULL(bt_debugfs)) +		return 0; + +	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, +					    NULL, &l2cap_debugfs_fops); + +	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs, +			   &le_max_credits); +	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs, +			   &le_default_mps); + +	bt_6lowpan_init();  	return 0;  }  void l2cap_exit(void)  { +	bt_6lowpan_cleanup();  	debugfs_remove(l2cap_debugfs);  	l2cap_cleanup_sockets();  } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 0098af80b21..e1378693cc9 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -27,12 +27,14 @@  /* Bluetooth L2CAP sockets. */ +#include <linux/module.h>  #include <linux/export.h>  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h>  #include <net/bluetooth/l2cap.h> -#include <net/bluetooth/smp.h> + +#include "smp.h"  static struct bt_sock_list l2cap_sk_list = {  	.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) @@ -49,6 +51,32 @@ bool l2cap_is_socket(struct socket *sock)  }  EXPORT_SYMBOL(l2cap_is_socket); +static int l2cap_validate_bredr_psm(u16 psm) +{ +	/* PSM must be odd and lsb of upper byte must be 0 */ +	if ((psm & 0x0101) != 0x0001) +		return -EINVAL; + +	/* Restrict usage of well-known PSMs */ +	if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) +		return -EACCES; + +	return 0; +} + +static int l2cap_validate_le_psm(u16 psm) +{ +	/* Valid LE_PSM ranges are defined only until 0x00ff */ +	if (psm > 0x00ff) +		return -EINVAL; + +	/* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */ +	if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE)) +		return -EACCES; + +	return 0; +} +  static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)  {  	struct sock *sk = sock->sk; @@ -68,6 +96,25 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)  	if (la.l2_cid && la.l2_psm)  		return -EINVAL; +	if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) +		return -EINVAL; + +	if (la.l2_cid) { +		/* When the socket gets created it defaults to +		 * CHAN_CONN_ORIENTED, so we need to overwrite the +		 * default here. +		 */ +		chan->chan_type = L2CAP_CHAN_FIXED; +		chan->omtu = L2CAP_DEFAULT_MTU; +	} + +	if (bdaddr_type_is_le(la.l2_bdaddr_type)) { +		/* We only allow ATT user space socket */ +		if (la.l2_cid && +		    la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) +			return -EINVAL; +	} +  	lock_sock(sk);  	if (sk->sk_state != BT_OPEN) { @@ -78,17 +125,13 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)  	if (la.l2_psm) {  		__u16 psm = __le16_to_cpu(la.l2_psm); -		/* PSM must be odd and lsb of upper byte must be 0 */ -		if ((psm & 0x0101) != 0x0001) { -			err = -EINVAL; -			goto done; -		} +		if (la.l2_bdaddr_type == BDADDR_BREDR) +			err = l2cap_validate_bredr_psm(psm); +		else +			err = l2cap_validate_le_psm(psm); -		/* Restrict usage of well-known PSMs */ -		if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) { -			err = -EACCES; +		if (err)  			goto done; -		}  	}  	if (la.l2_cid) @@ -99,11 +142,26 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)  	if (err < 0)  		goto done; -	if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || -	    __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) +	switch (chan->chan_type) { +	case L2CAP_CHAN_CONN_LESS: +		if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP) +			chan->sec_level = BT_SECURITY_SDP; +		break; +	case L2CAP_CHAN_CONN_ORIENTED: +		if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || +		    __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) +			chan->sec_level = BT_SECURITY_SDP; +		break; +	case L2CAP_CHAN_RAW:  		chan->sec_level = BT_SECURITY_SDP; +		break; +	} + +	bacpy(&chan->src, &la.l2_bdaddr); +	chan->src_type = la.l2_bdaddr_type; -	bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); +	if (chan->psm && bdaddr_type_is_le(chan->src_type)) +		chan->mode = L2CAP_MODE_LE_FLOWCTL;  	chan->state = BT_BOUND;  	sk->sk_state = BT_BOUND; @@ -134,6 +192,48 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,  	if (la.l2_cid && la.l2_psm)  		return -EINVAL; +	if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) +		return -EINVAL; + +	/* Check that the socket wasn't bound to something that +	 * conflicts with the address given to connect(). If chan->src +	 * is BDADDR_ANY it means bind() was never used, in which case +	 * chan->src_type and la.l2_bdaddr_type do not need to match. +	 */ +	if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) && +	    bdaddr_type_is_le(la.l2_bdaddr_type)) { +		/* Old user space versions will try to incorrectly bind +		 * the ATT socket using BDADDR_BREDR. We need to accept +		 * this and fix up the source address type only when +		 * both the source CID and destination CID indicate +		 * ATT. Anything else is an invalid combination. +		 */ +		if (chan->scid != L2CAP_CID_ATT || +		    la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) +			return -EINVAL; + +		/* We don't have the hdev available here to make a +		 * better decision on random vs public, but since all +		 * user space versions that exhibit this issue anyway do +		 * not support random local addresses assuming public +		 * here is good enough. +		 */ +		chan->src_type = BDADDR_LE_PUBLIC; +	} + +	if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR) +		return -EINVAL; + +	if (bdaddr_type_is_le(la.l2_bdaddr_type)) { +		/* We only allow ATT user space socket */ +		if (la.l2_cid && +		    la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) +			return -EINVAL; +	} + +	if (chan->psm && bdaddr_type_is_le(chan->src_type)) +		chan->mode = L2CAP_MODE_LE_FLOWCTL; +  	err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),  				 &la.l2_bdaddr, la.l2_bdaddr_type);  	if (err) @@ -171,6 +271,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)  	switch (chan->mode) {  	case L2CAP_MODE_BASIC: +	case L2CAP_MODE_LE_FLOWCTL:  		break;  	case L2CAP_MODE_ERTM:  	case L2CAP_MODE_STREAMING: @@ -259,18 +360,24 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,  	BT_DBG("sock %p, sk %p", sock, sk); +	if (peer && sk->sk_state != BT_CONNECTED && +	    sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) +		return -ENOTCONN; +  	memset(la, 0, sizeof(struct sockaddr_l2));  	addr->sa_family = AF_BLUETOOTH;  	*len = sizeof(struct sockaddr_l2); +	la->l2_psm = chan->psm; +  	if (peer) { -		la->l2_psm = chan->psm; -		bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); +		bacpy(&la->l2_bdaddr, &chan->dst);  		la->l2_cid = cpu_to_le16(chan->dcid); +		la->l2_bdaddr_type = chan->dst_type;  	} else { -		la->l2_psm = chan->sport; -		bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); +		bacpy(&la->l2_bdaddr, &chan->src);  		la->l2_cid = cpu_to_le16(chan->scid); +		la->l2_bdaddr_type = chan->src_type;  	}  	return 0; @@ -295,6 +402,16 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,  	switch (optname) {  	case L2CAP_OPTIONS: +		/* LE sockets should use BT_SNDMTU/BT_RCVMTU, but since +		 * legacy ATT code depends on getsockopt for +		 * L2CAP_OPTIONS we need to let this pass. +		 */ +		if (bdaddr_type_is_le(chan->src_type) && +		    chan->scid != L2CAP_CID_ATT) { +			err = -EINVAL; +			break; +		} +  		memset(&opts, 0, sizeof(opts));  		opts.imtu     = chan->imtu;  		opts.omtu     = chan->omtu; @@ -322,6 +439,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,  			opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |  			      L2CAP_LM_SECURE;  			break; +		case BT_SECURITY_FIPS: +			opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | +			      L2CAP_LM_SECURE | L2CAP_LM_FIPS; +			break;  		default:  			opt = 0;  			break; @@ -335,6 +456,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,  		if (put_user(opt, (u32 __user *) optval))  			err = -EFAULT; +  		break;  	case L2CAP_CONNINFO: @@ -389,6 +511,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,  	switch (optname) {  	case BT_SECURITY:  		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && +		    chan->chan_type != L2CAP_CHAN_FIXED &&  		    chan->chan_type != L2CAP_CHAN_RAW) {  			err = -EINVAL;  			break; @@ -445,12 +568,32 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,  		break;  	case BT_CHANNEL_POLICY: -		if (!enable_hs) { -			err = -ENOPROTOOPT; +		if (put_user(chan->chan_policy, (u32 __user *) optval)) +			err = -EFAULT; +		break; + +	case BT_SNDMTU: +		if (!bdaddr_type_is_le(chan->src_type)) { +			err = -EINVAL;  			break;  		} -		if (put_user(chan->chan_policy, (u32 __user *) optval)) +		if (sk->sk_state != BT_CONNECTED) { +			err = -ENOTCONN; +			break; +		} + +		if (put_user(chan->omtu, (u16 __user *) optval)) +			err = -EFAULT; +		break; + +	case BT_RCVMTU: +		if (!bdaddr_type_is_le(chan->src_type)) { +			err = -EINVAL; +			break; +		} + +		if (put_user(chan->imtu, (u16 __user *) optval))  			err = -EFAULT;  		break; @@ -494,6 +637,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,  	switch (optname) {  	case L2CAP_OPTIONS: +		if (bdaddr_type_is_le(chan->src_type)) { +			err = -EINVAL; +			break; +		} +  		if (sk->sk_state == BT_CONNECTED) {  			err = -EINVAL;  			break; @@ -525,6 +673,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,  		chan->mode = opts.mode;  		switch (chan->mode) { +		case L2CAP_MODE_LE_FLOWCTL: +			break;  		case L2CAP_MODE_BASIC:  			clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);  			break; @@ -552,6 +702,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,  			break;  		} +		if (opt & L2CAP_LM_FIPS) { +			err = -EINVAL; +			break; +		} +  		if (opt & L2CAP_LM_AUTH)  			chan->sec_level = BT_SECURITY_LOW;  		if (opt & L2CAP_LM_ENCRYPT) @@ -603,6 +758,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  	switch (optname) {  	case BT_SECURITY:  		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && +		    chan->chan_type != L2CAP_CHAN_FIXED &&  		    chan->chan_type != L2CAP_CHAN_RAW) {  			err = -EINVAL;  			break; @@ -631,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  		/*change security for LE channels */  		if (chan->scid == L2CAP_CID_ATT) { -			if (!conn->hcon->out) { -				err = -EINVAL; -				break; -			} -  			if (smp_conn_security(conn->hcon, sec.level))  				break;  			sk->sk_state = BT_CONFIG; @@ -665,10 +816,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  			break;  		} -		if (opt) +		if (opt) {  			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); -		else +			set_bit(FLAG_DEFER_SETUP, &chan->flags); +		} else {  			clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); +			clear_bit(FLAG_DEFER_SETUP, &chan->flags); +		}  		break;  	case BT_FLUSHABLE: @@ -683,7 +837,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  		}  		if (opt == BT_FLUSHABLE_OFF) { -			struct l2cap_conn *conn = chan->conn; +			conn = chan->conn;  			/* proceed further only when we have l2cap_conn and  			   No Flush support in the LM */  			if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { @@ -720,11 +874,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  		break;  	case BT_CHANNEL_POLICY: -		if (!enable_hs) { -			err = -ENOPROTOOPT; -			break; -		} -  		if (get_user(opt, (u32 __user *) optval)) {  			err = -EFAULT;  			break; @@ -749,6 +898,37 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,  		break; +	case BT_SNDMTU: +		if (!bdaddr_type_is_le(chan->src_type)) { +			err = -EINVAL; +			break; +		} + +		/* Setting is not supported as it's the remote side that +		 * decides this. +		 */ +		err = -EPERM; +		break; + +	case BT_RCVMTU: +		if (!bdaddr_type_is_le(chan->src_type)) { +			err = -EINVAL; +			break; +		} + +		if (sk->sk_state == BT_CONNECTED) { +			err = -EISCONN; +			break; +		} + +		if (get_user(opt, (u32 __user *) optval)) { +			err = -EFAULT; +			break; +		} + +		chan->imtu = opt; +		break; +  	default:  		err = -ENOPROTOOPT;  		break; @@ -777,6 +957,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	if (sk->sk_state != BT_CONNECTED)  		return -ENOTCONN; +	lock_sock(sk); +	err = bt_sock_wait_ready(sk, msg->msg_flags); +	release_sock(sk); +	if (err) +		return err; +  	l2cap_chan_lock(chan);  	err = l2cap_chan_send(chan, msg, len, sk->sk_priority);  	l2cap_chan_unlock(chan); @@ -795,12 +981,18 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,  						    &bt_sk(sk)->flags)) { -		sk->sk_state = BT_CONFIG; -		pi->chan->state = BT_CONFIG; +		if (bdaddr_type_is_le(pi->chan->src_type)) { +			sk->sk_state = BT_CONNECTED; +			pi->chan->state = BT_CONNECTED; +			__l2cap_le_connect_rsp_defer(pi->chan); +		} else { +			sk->sk_state = BT_CONFIG; +			pi->chan->state = BT_CONFIG; +			__l2cap_connect_rsp_defer(pi->chan); +		} -		__l2cap_connect_rsp_defer(pi->chan); -		release_sock(sk); -		return 0; +		err = 0; +		goto done;  	}  	release_sock(sk); @@ -856,6 +1048,38 @@ static void l2cap_sock_kill(struct sock *sk)  	sock_put(sk);  } +static int __l2cap_wait_ack(struct sock *sk) +{ +	struct l2cap_chan *chan = l2cap_pi(sk)->chan; +	DECLARE_WAITQUEUE(wait, current); +	int err = 0; +	int timeo = HZ/5; + +	add_wait_queue(sk_sleep(sk), &wait); +	set_current_state(TASK_INTERRUPTIBLE); +	while (chan->unacked_frames > 0 && chan->conn) { +		if (!timeo) +			timeo = HZ/5; + +		if (signal_pending(current)) { +			err = sock_intr_errno(timeo); +			break; +		} + +		release_sock(sk); +		timeo = schedule_timeout(timeo); +		lock_sock(sk); +		set_current_state(TASK_INTERRUPTIBLE); + +		err = sock_error(sk); +		if (err) +			break; +	} +	set_current_state(TASK_RUNNING); +	remove_wait_queue(sk_sleep(sk), &wait); +	return err; +} +  static int l2cap_sock_shutdown(struct socket *sock, int how)  {  	struct sock *sk = sock->sk; @@ -946,16 +1170,21 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)  {  	struct sock *sk, *parent = chan->data; +	lock_sock(parent); +  	/* Check for backlog size */  	if (sk_acceptq_is_full(parent)) {  		BT_DBG("backlog full %d", parent->sk_ack_backlog); +		release_sock(parent);  		return NULL;  	}  	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,  			      GFP_ATOMIC); -	if (!sk) +	if (!sk) { +		release_sock(parent);  		return NULL; +        }  	bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); @@ -963,18 +1192,19 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)  	bt_accept_enqueue(parent, sk); +	release_sock(parent); +  	return l2cap_pi(sk)->chan;  }  static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)  { -	int err;  	struct sock *sk = chan->data; -	struct l2cap_pinfo *pi = l2cap_pi(sk); +	int err;  	lock_sock(sk); -	if (pi->rx_busy_skb) { +	if (l2cap_pi(sk)->rx_busy_skb) {  		err = -ENOMEM;  		goto done;  	} @@ -990,9 +1220,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)  	 * acked and reassembled until there is buffer space  	 * available.  	 */ -	if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) { -		pi->rx_busy_skb = skb; -		l2cap_chan_busy(pi->chan, 1); +	if (err < 0 && chan->mode == L2CAP_MODE_ERTM) { +		l2cap_pi(sk)->rx_busy_skb = skb; +		l2cap_chan_busy(chan, 1);  		err = 0;  	} @@ -1039,7 +1269,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)  		if (parent) {  			bt_accept_unlink(sk); -			parent->sk_data_ready(parent, 0); +			parent->sk_data_ready(parent);  		} else {  			sk->sk_state_change(sk);  		} @@ -1050,26 +1280,33 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)  	release_sock(sk);  } -static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state) +static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, +				       int err)  {  	struct sock *sk = chan->data;  	sk->sk_state = state; + +	if (err) +		sk->sk_err = err;  }  static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,  					       unsigned long len, int nb)  { +	struct sock *sk = chan->data;  	struct sk_buff *skb;  	int err;  	l2cap_chan_unlock(chan); -	skb = bt_skb_send_alloc(chan->sk, len, nb, &err); +	skb = bt_skb_send_alloc(sk, len, nb, &err);  	l2cap_chan_lock(chan);  	if (!skb)  		return ERR_PTR(err); +	bt_cb(skb)->chan = chan; +  	return skb;  } @@ -1088,18 +1325,54 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)  	sk->sk_state_change(sk);  	if (parent) -		parent->sk_data_ready(parent, 0); +		parent->sk_data_ready(parent);  	release_sock(sk);  }  static void l2cap_sock_defer_cb(struct l2cap_chan *chan)  { -	struct sock *sk = chan->data; -	struct sock *parent = bt_sk(sk)->parent; +	struct sock *parent, *sk = chan->data; +	lock_sock(sk); + +	parent = bt_sk(sk)->parent;  	if (parent) -		parent->sk_data_ready(parent, 0); +		parent->sk_data_ready(parent); + +	release_sock(sk); +} + +static void l2cap_sock_resume_cb(struct l2cap_chan *chan) +{ +	struct sock *sk = chan->data; + +	clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); +	sk->sk_state_change(sk); +} + +static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan) +{ +	struct sock *sk = chan->data; + +	lock_sock(sk); +	sk->sk_shutdown = SHUTDOWN_MASK; +	release_sock(sk); +} + +static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan) +{ +	struct sock *sk = chan->data; + +	return sk->sk_sndtimeo; +} + +static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) +{ +	struct sock *sk = chan->data; + +	set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); +	sk->sk_state_change(sk);  }  static struct l2cap_ops l2cap_chan_ops = { @@ -1111,6 +1384,10 @@ static struct l2cap_ops l2cap_chan_ops = {  	.state_change	= l2cap_sock_state_change_cb,  	.ready		= l2cap_sock_ready_cb,  	.defer		= l2cap_sock_defer_cb, +	.resume		= l2cap_sock_resume_cb, +	.suspend	= l2cap_sock_suspend_cb, +	.set_shutdown	= l2cap_sock_set_shutdown_cb, +	.get_sndtimeo	= l2cap_sock_get_sndtimeo_cb,  	.alloc_skb	= l2cap_sock_alloc_skb_cb,  }; @@ -1120,6 +1397,7 @@ static void l2cap_sock_destruct(struct sock *sk)  	if (l2cap_pi(sk)->chan)  		l2cap_chan_put(l2cap_pi(sk)->chan); +  	if (l2cap_pi(sk)->rx_busy_skb) {  		kfree_skb(l2cap_pi(sk)->rx_busy_skb);  		l2cap_pi(sk)->rx_busy_skb = NULL; @@ -1129,10 +1407,22 @@ static void l2cap_sock_destruct(struct sock *sk)  	skb_queue_purge(&sk->sk_write_queue);  } +static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name, +			       int *msg_namelen) +{ +	DECLARE_SOCKADDR(struct sockaddr_l2 *, la, msg_name); + +	memset(la, 0, sizeof(struct sockaddr_l2)); +	la->l2_family = AF_BLUETOOTH; +	la->l2_psm = bt_cb(skb)->psm; +	bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr); + +	*msg_namelen = sizeof(struct sockaddr_l2); +} +  static void l2cap_sock_init(struct sock *sk, struct sock *parent)  { -	struct l2cap_pinfo *pi = l2cap_pi(sk); -	struct l2cap_chan *chan = pi->chan; +	struct l2cap_chan *chan = l2cap_pi(sk)->chan;  	BT_DBG("sk %p", sk); @@ -1153,16 +1443,23 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)  		chan->tx_win_max = pchan->tx_win_max;  		chan->sec_level = pchan->sec_level;  		chan->flags = pchan->flags; +		chan->tx_credits = pchan->tx_credits; +		chan->rx_credits = pchan->rx_credits; + +		if (chan->chan_type == L2CAP_CHAN_FIXED) { +			chan->scid = pchan->scid; +			chan->dcid = pchan->scid; +		}  		security_sk_clone(parent, sk);  	} else { -  		switch (sk->sk_type) {  		case SOCK_RAW:  			chan->chan_type = L2CAP_CHAN_RAW;  			break;  		case SOCK_DGRAM:  			chan->chan_type = L2CAP_CHAN_CONN_LESS; +			bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;  			break;  		case SOCK_SEQPACKET:  		case SOCK_STREAM: @@ -1224,8 +1521,6 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,  	l2cap_chan_hold(chan); -	chan->sk = sk; -  	l2cap_pi(sk)->chan = chan;  	return sk; diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index b3fbc73516c..941ad7530ed 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)  		return EIO;  	case 0x04: +	case 0x3c:  		return EHOSTDOWN;  	case 0x05: diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index fedc5399d46..af8e0a6243b 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -29,13 +29,13 @@  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h>  #include <net/bluetooth/mgmt.h> -#include <net/bluetooth/smp.h> -bool enable_hs; +#include "smp.h"  #define MGMT_VERSION	1 -#define MGMT_REVISION	3 +#define MGMT_REVISION	6  static const u16 mgmt_commands[] = {  	MGMT_OP_READ_INDEX_LIST, @@ -76,6 +76,15 @@ static const u16 mgmt_commands[] = {  	MGMT_OP_BLOCK_DEVICE,  	MGMT_OP_UNBLOCK_DEVICE,  	MGMT_OP_SET_DEVICE_ID, +	MGMT_OP_SET_ADVERTISING, +	MGMT_OP_SET_BREDR, +	MGMT_OP_SET_STATIC_ADDRESS, +	MGMT_OP_SET_SCAN_PARAMS, +	MGMT_OP_SET_SECURE_CONN, +	MGMT_OP_SET_DEBUG_KEYS, +	MGMT_OP_SET_PRIVACY, +	MGMT_OP_LOAD_IRKS, +	MGMT_OP_GET_CONN_INFO,  };  static const u16 mgmt_events[] = { @@ -100,6 +109,8 @@ static const u16 mgmt_events[] = {  	MGMT_EV_DEVICE_UNBLOCKED,  	MGMT_EV_DEVICE_UNPAIRED,  	MGMT_EV_PASSKEY_NOTIFY, +	MGMT_EV_NEW_IRK, +	MGMT_EV_NEW_CSRK,  };  #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000) @@ -124,7 +135,7 @@ static u8 mgmt_status_table[] = {  	MGMT_STATUS_FAILED,		/* Hardware Failure */  	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */  	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */ -	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */ +	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */  	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */  	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */  	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */ @@ -181,11 +192,6 @@ static u8 mgmt_status_table[] = {  	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */  }; -bool mgmt_valid_hdev(struct hci_dev *hdev) -{ -	return hdev->dev_type == HCI_BREDR; -} -  static u8 mgmt_status(u8 hci_status)  {  	if (hci_status < ARRAY_SIZE(mgmt_status_table)) @@ -209,7 +215,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)  	hdr = (void *) skb_put(skb, sizeof(*hdr)); -	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS); +	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);  	hdr->index = cpu_to_le16(index);  	hdr->len = cpu_to_le16(sizeof(*ev)); @@ -240,7 +246,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,  	hdr = (void *) skb_put(skb, sizeof(*hdr)); -	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE); +	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);  	hdr->index = cpu_to_le16(index);  	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); @@ -266,7 +272,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,  	BT_DBG("sock %p", sk);  	rp.version = MGMT_VERSION; -	rp.revision = __constant_cpu_to_le16(MGMT_REVISION); +	rp.revision = cpu_to_le16(MGMT_REVISION);  	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,  			    sizeof(rp)); @@ -290,8 +296,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,  	if (!rp)  		return -ENOMEM; -	rp->num_commands = __constant_cpu_to_le16(num_commands); -	rp->num_events = __constant_cpu_to_le16(num_events); +	rp->num_commands = cpu_to_le16(num_commands); +	rp->num_events = cpu_to_le16(num_events);  	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)  		put_unaligned_le16(mgmt_commands[i], opcode); @@ -321,10 +327,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,  	count = 0;  	list_for_each_entry(d, &hci_dev_list, list) { -		if (!mgmt_valid_hdev(d)) -			continue; - -		count++; +		if (d->dev_type == HCI_BREDR) +			count++;  	}  	rp_len = sizeof(*rp) + (2 * count); @@ -339,11 +343,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,  		if (test_bit(HCI_SETUP, &d->dev_flags))  			continue; -		if (!mgmt_valid_hdev(d)) +		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))  			continue; -		rp->index[count++] = cpu_to_le16(d->id); -		BT_DBG("Added hci%u", d->id); +		if (d->dev_type == HCI_BREDR) { +			rp->index[count++] = cpu_to_le16(d->id); +			BT_DBG("Added hci%u", d->id); +		}  	}  	rp->num_controllers = cpu_to_le16(count); @@ -365,9 +371,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)  	settings |= MGMT_SETTING_POWERED;  	settings |= MGMT_SETTING_PAIRABLE; - -	if (lmp_ssp_capable(hdev)) -		settings |= MGMT_SETTING_SSP; +	settings |= MGMT_SETTING_DEBUG_KEYS;  	if (lmp_bredr_capable(hdev)) {  		settings |= MGMT_SETTING_CONNECTABLE; @@ -376,13 +380,22 @@ static u32 get_supported_settings(struct hci_dev *hdev)  		settings |= MGMT_SETTING_DISCOVERABLE;  		settings |= MGMT_SETTING_BREDR;  		settings |= MGMT_SETTING_LINK_SECURITY; -	} -	if (enable_hs) -		settings |= MGMT_SETTING_HS; +		if (lmp_ssp_capable(hdev)) { +			settings |= MGMT_SETTING_SSP; +			settings |= MGMT_SETTING_HS; +		} -	if (lmp_le_capable(hdev)) +		if (lmp_sc_capable(hdev) || +		    test_bit(HCI_FORCE_SC, &hdev->dev_flags)) +			settings |= MGMT_SETTING_SECURE_CONN; +	} + +	if (lmp_le_capable(hdev)) {  		settings |= MGMT_SETTING_LE; +		settings |= MGMT_SETTING_ADVERTISING; +		settings |= MGMT_SETTING_PRIVACY; +	}  	return settings;  } @@ -406,7 +419,7 @@ static u32 get_current_settings(struct hci_dev *hdev)  	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))  		settings |= MGMT_SETTING_PAIRABLE; -	if (lmp_bredr_capable(hdev)) +	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))  		settings |= MGMT_SETTING_BREDR;  	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) @@ -421,6 +434,18 @@ static u32 get_current_settings(struct hci_dev *hdev)  	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))  		settings |= MGMT_SETTING_HS; +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) +		settings |= MGMT_SETTING_ADVERTISING; + +	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) +		settings |= MGMT_SETTING_SECURE_CONN; + +	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags)) +		settings |= MGMT_SETTING_DEBUG_KEYS; + +	if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) +		settings |= MGMT_SETTING_PRIVACY; +  	return settings;  } @@ -534,6 +559,150 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)  	return ptr;  } +static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) +{ +	struct pending_cmd *cmd; + +	list_for_each_entry(cmd, &hdev->mgmt_pending, list) { +		if (cmd->opcode == opcode) +			return cmd; +	} + +	return NULL; +} + +static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) +{ +	u8 ad_len = 0; +	size_t name_len; + +	name_len = strlen(hdev->dev_name); +	if (name_len > 0) { +		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; + +		if (name_len > max_len) { +			name_len = max_len; +			ptr[1] = EIR_NAME_SHORT; +		} else +			ptr[1] = EIR_NAME_COMPLETE; + +		ptr[0] = name_len + 1; + +		memcpy(ptr + 2, hdev->dev_name, name_len); + +		ad_len += (name_len + 2); +		ptr += (name_len + 2); +	} + +	return ad_len; +} + +static void update_scan_rsp_data(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	struct hci_cp_le_set_scan_rsp_data cp; +	u8 len; + +	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) +		return; + +	memset(&cp, 0, sizeof(cp)); + +	len = create_scan_rsp_data(hdev, cp.data); + +	if (hdev->scan_rsp_data_len == len && +	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0) +		return; + +	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); +	hdev->scan_rsp_data_len = len; + +	cp.length = len; + +	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); +} + +static u8 get_adv_discov_flags(struct hci_dev *hdev) +{ +	struct pending_cmd *cmd; + +	/* If there's a pending mgmt command the flags will not yet have +	 * their final values, so check for this first. +	 */ +	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); +	if (cmd) { +		struct mgmt_mode *cp = cmd->param; +		if (cp->val == 0x01) +			return LE_AD_GENERAL; +		else if (cp->val == 0x02) +			return LE_AD_LIMITED; +	} else { +		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) +			return LE_AD_LIMITED; +		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) +			return LE_AD_GENERAL; +	} + +	return 0; +} + +static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr) +{ +	u8 ad_len = 0, flags = 0; + +	flags |= get_adv_discov_flags(hdev); + +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		flags |= LE_AD_NO_BREDR; + +	if (flags) { +		BT_DBG("adv flags 0x%02x", flags); + +		ptr[0] = 2; +		ptr[1] = EIR_FLAGS; +		ptr[2] = flags; + +		ad_len += 3; +		ptr += 3; +	} + +	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) { +		ptr[0] = 2; +		ptr[1] = EIR_TX_POWER; +		ptr[2] = (u8) hdev->adv_tx_power; + +		ad_len += 3; +		ptr += 3; +	} + +	return ad_len; +} + +static void update_adv_data(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	struct hci_cp_le_set_adv_data cp; +	u8 len; + +	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) +		return; + +	memset(&cp, 0, sizeof(cp)); + +	len = create_adv_data(hdev, cp.data); + +	if (hdev->adv_data_len == len && +	    memcmp(cp.data, hdev->adv_data, len) == 0) +		return; + +	memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); +	hdev->adv_data_len = len; + +	cp.length = len; + +	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); +} +  static void create_eir(struct hci_dev *hdev, u8 *data)  {  	u8 *ptr = data; @@ -632,6 +801,9 @@ static void update_class(struct hci_request *req)  	if (!hdev_is_powered(hdev))  		return; +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		return; +  	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))  		return; @@ -639,12 +811,73 @@ static void update_class(struct hci_request *req)  	cod[1] = hdev->major_class;  	cod[2] = get_service_classes(hdev); +	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) +		cod[1] |= 0x20; +  	if (memcmp(cod, hdev->dev_class, 3) == 0)  		return;  	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);  } +static bool get_connectable(struct hci_dev *hdev) +{ +	struct pending_cmd *cmd; + +	/* If there's a pending mgmt command the flag will not yet have +	 * it's final value, so check for this first. +	 */ +	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); +	if (cmd) { +		struct mgmt_mode *cp = cmd->param; +		return cp->val; +	} + +	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); +} + +static void enable_advertising(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	struct hci_cp_le_set_adv_param cp; +	u8 own_addr_type, enable = 0x01; +	bool connectable; + +	/* Clear the HCI_ADVERTISING bit temporarily so that the +	 * hci_update_random_address knows that it's safe to go ahead +	 * and write a new random address. The flag will be set back on +	 * as soon as the SET_ADV_ENABLE HCI command completes. +	 */ +	clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + +	connectable = get_connectable(hdev); + +	/* Set require_privacy to true only when non-connectable +	 * advertising is used. In that case it is fine to use a +	 * non-resolvable private address. +	 */ +	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0) +		return; + +	memset(&cp, 0, sizeof(cp)); +	cp.min_interval = cpu_to_le16(0x0800); +	cp.max_interval = cpu_to_le16(0x0800); +	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; +	cp.own_address_type = own_addr_type; +	cp.channel_map = hdev->le_adv_channel_map; + +	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); + +	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); +} + +static void disable_advertising(struct hci_request *req) +{ +	u8 enable = 0x00; + +	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); +} +  static void service_cache_off(struct work_struct *work)  {  	struct hci_dev *hdev = container_of(work, struct hci_dev, @@ -666,12 +899,39 @@ static void service_cache_off(struct work_struct *work)  	hci_req_run(&req, NULL);  } +static void rpa_expired(struct work_struct *work) +{ +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    rpa_expired.work); +	struct hci_request req; + +	BT_DBG(""); + +	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + +	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) || +	    hci_conn_num(hdev, LE_LINK) > 0) +		return; + +	/* The generation of a new RPA and programming it into the +	 * controller happens in the enable_advertising() function. +	 */ + +	hci_req_init(&req, hdev); + +	disable_advertising(&req); +	enable_advertising(&req); + +	hci_req_run(&req, NULL); +} +  static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)  {  	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))  		return;  	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); +	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);  	/* Non-mgmt controlled devices get this bit set  	 * implicitly so that pairing works for them, however @@ -763,18 +1023,6 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,  	}  } -static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) -{ -	struct pending_cmd *cmd; - -	list_for_each_entry(cmd, &hdev->mgmt_pending, list) { -		if (cmd->opcode == opcode) -			return cmd; -	} - -	return NULL; -} -  static void mgmt_pending_remove(struct pending_cmd *cmd)  {  	list_del(&cmd->list); @@ -789,6 +1037,106 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)  			    sizeof(settings));  } +static void clean_up_hci_complete(struct hci_dev *hdev, u8 status) +{ +	BT_DBG("%s status 0x%02x", hdev->name, status); + +	if (hci_conn_count(hdev) == 0) { +		cancel_delayed_work(&hdev->power_off); +		queue_work(hdev->req_workqueue, &hdev->power_off.work); +	} +} + +static void hci_stop_discovery(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	struct hci_cp_remote_name_req_cancel cp; +	struct inquiry_entry *e; + +	switch (hdev->discovery.state) { +	case DISCOVERY_FINDING: +		if (test_bit(HCI_INQUIRY, &hdev->flags)) { +			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); +		} else { +			cancel_delayed_work(&hdev->le_scan_disable); +			hci_req_add_le_scan_disable(req); +		} + +		break; + +	case DISCOVERY_RESOLVING: +		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, +						     NAME_PENDING); +		if (!e) +			return; + +		bacpy(&cp.bdaddr, &e->data.bdaddr); +		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), +			    &cp); + +		break; + +	default: +		/* Passive scanning */ +		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			hci_req_add_le_scan_disable(req); +		break; +	} +} + +static int clean_up_hci_state(struct hci_dev *hdev) +{ +	struct hci_request req; +	struct hci_conn *conn; + +	hci_req_init(&req, hdev); + +	if (test_bit(HCI_ISCAN, &hdev->flags) || +	    test_bit(HCI_PSCAN, &hdev->flags)) { +		u8 scan = 0x00; +		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +	} + +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) +		disable_advertising(&req); + +	hci_stop_discovery(&req); + +	list_for_each_entry(conn, &hdev->conn_hash.list, list) { +		struct hci_cp_disconnect dc; +		struct hci_cp_reject_conn_req rej; + +		switch (conn->state) { +		case BT_CONNECTED: +		case BT_CONFIG: +			dc.handle = cpu_to_le16(conn->handle); +			dc.reason = 0x15; /* Terminated due to Power Off */ +			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc); +			break; +		case BT_CONNECT: +			if (conn->type == LE_LINK) +				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL, +					    0, NULL); +			else if (conn->type == ACL_LINK) +				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL, +					    6, &conn->dst); +			break; +		case BT_CONNECT2: +			bacpy(&rej.bdaddr, &conn->dst); +			rej.reason = 0x15; /* Terminated due to Power Off */ +			if (conn->type == ACL_LINK) +				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ, +					    sizeof(rej), &rej); +			else if (conn->type == SCO_LINK) +				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ, +					    sizeof(rej), &rej); +			break; +		} +	} + +	return hci_req_run(&req, clean_up_hci_complete); +} +  static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,  		       u16 len)  { @@ -804,6 +1152,12 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_dev_lock(hdev); +	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { +		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, +				 MGMT_STATUS_BUSY); +		goto failed; +	} +  	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {  		cancel_delayed_work(&hdev->power_off); @@ -820,24 +1174,29 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,  		goto failed;  	} -	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { -		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, -				 MGMT_STATUS_BUSY); -		goto failed; -	} -  	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);  	if (!cmd) {  		err = -ENOMEM;  		goto failed;  	} -	if (cp->val) +	if (cp->val) {  		queue_work(hdev->req_workqueue, &hdev->power_on); -	else -		queue_work(hdev->req_workqueue, &hdev->power_off.work); - -	err = 0; +		err = 0; +	} else { +		/* Disconnect connections, stop scans, etc */ +		err = clean_up_hci_state(hdev); +		if (!err) +			queue_delayed_work(hdev->req_workqueue, &hdev->power_off, +					   HCI_POWER_OFF_TIMEOUT); + +		/* ENODATA means there were no HCI commands queued */ +		if (err == -ENODATA) { +			cancel_delayed_work(&hdev->power_off); +			queue_work(hdev->req_workqueue, &hdev->power_off.work); +			err = 0; +		} +	}  failed:  	hci_dev_unlock(hdev); @@ -859,7 +1218,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,  	if (hdev)  		hdr->index = cpu_to_le16(hdev->id);  	else -		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE); +		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);  	hdr->len = cpu_to_le16(data_len);  	if (data) @@ -883,27 +1242,141 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)  	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);  } +struct cmd_lookup { +	struct sock *sk; +	struct hci_dev *hdev; +	u8 mgmt_status; +}; + +static void settings_rsp(struct pending_cmd *cmd, void *data) +{ +	struct cmd_lookup *match = data; + +	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); + +	list_del(&cmd->list); + +	if (match->sk == NULL) { +		match->sk = cmd->sk; +		sock_hold(match->sk); +	} + +	mgmt_pending_free(cmd); +} + +static void cmd_status_rsp(struct pending_cmd *cmd, void *data) +{ +	u8 *status = data; + +	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); +	mgmt_pending_remove(cmd); +} + +static u8 mgmt_bredr_support(struct hci_dev *hdev) +{ +	if (!lmp_bredr_capable(hdev)) +		return MGMT_STATUS_NOT_SUPPORTED; +	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		return MGMT_STATUS_REJECTED; +	else +		return MGMT_STATUS_SUCCESS; +} + +static u8 mgmt_le_support(struct hci_dev *hdev) +{ +	if (!lmp_le_capable(hdev)) +		return MGMT_STATUS_NOT_SUPPORTED; +	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) +		return MGMT_STATUS_REJECTED; +	else +		return MGMT_STATUS_SUCCESS; +} + +static void set_discoverable_complete(struct hci_dev *hdev, u8 status) +{ +	struct pending_cmd *cmd; +	struct mgmt_mode *cp; +	struct hci_request req; +	bool changed; + +	BT_DBG("status 0x%02x", status); + +	hci_dev_lock(hdev); + +	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); +	if (!cmd) +		goto unlock; + +	if (status) { +		u8 mgmt_err = mgmt_status(status); +		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +		goto remove_cmd; +	} + +	cp = cmd->param; +	if (cp->val) { +		changed = !test_and_set_bit(HCI_DISCOVERABLE, +					    &hdev->dev_flags); + +		if (hdev->discov_timeout > 0) { +			int to = msecs_to_jiffies(hdev->discov_timeout * 1000); +			queue_delayed_work(hdev->workqueue, &hdev->discov_off, +					   to); +		} +	} else { +		changed = test_and_clear_bit(HCI_DISCOVERABLE, +					     &hdev->dev_flags); +	} + +	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); + +	if (changed) +		new_settings(hdev, cmd->sk); + +	/* When the discoverable mode gets changed, make sure +	 * that class of device has the limited discoverable +	 * bit correctly set. +	 */ +	hci_req_init(&req, hdev); +	update_class(&req); +	hci_req_run(&req, NULL); + +remove_cmd: +	mgmt_pending_remove(cmd); + +unlock: +	hci_dev_unlock(hdev); +} +  static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,  			    u16 len)  {  	struct mgmt_cp_set_discoverable *cp = data;  	struct pending_cmd *cmd; +	struct hci_request req;  	u16 timeout;  	u8 scan;  	int err;  	BT_DBG("request for %s", hdev->name); -	if (!lmp_bredr_capable(hdev)) +	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && +	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, -				 MGMT_STATUS_NOT_SUPPORTED); +				  MGMT_STATUS_REJECTED); -	if (cp->val != 0x00 && cp->val != 0x01) +	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,  				  MGMT_STATUS_INVALID_PARAMS);  	timeout = __le16_to_cpu(cp->timeout); -	if (!cp->val && timeout > 0) + +	/* Disabling discoverable requires that no timeout is set, +	 * and enabling limited discoverable requires a timeout. +	 */ +	if ((cp->val == 0x00 && timeout > 0) || +	    (cp->val == 0x02 && timeout == 0))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,  				  MGMT_STATUS_INVALID_PARAMS); @@ -931,6 +1404,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,  	if (!hdev_is_powered(hdev)) {  		bool changed = false; +		/* Setting limited discoverable when powered off is +		 * not a valid operation since it requires a timeout +		 * and so no need to check HCI_LIMITED_DISCOVERABLE. +		 */  		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {  			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);  			changed = true; @@ -946,16 +1423,20 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,  		goto failed;  	} -	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { -		if (hdev->discov_timeout > 0) { -			cancel_delayed_work(&hdev->discov_off); -			hdev->discov_timeout = 0; -		} +	/* If the current mode is the same, then just update the timeout +	 * value with the new value. And if only the timeout gets updated, +	 * then no need for any HCI transactions. +	 */ +	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) && +	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE, +					  &hdev->dev_flags)) { +		cancel_delayed_work(&hdev->discov_off); +		hdev->discov_timeout = timeout; -		if (cp->val && timeout > 0) { -			hdev->discov_timeout = timeout; +		if (cp->val && hdev->discov_timeout > 0) { +			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);  			queue_delayed_work(hdev->workqueue, &hdev->discov_off, -				msecs_to_jiffies(hdev->discov_timeout * 1000)); +					   to);  		}  		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); @@ -968,20 +1449,66 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,  		goto failed;  	} +	/* Cancel any potential discoverable timeout that might be +	 * still active and store new timeout value. The arming of +	 * the timeout happens in the complete handler. +	 */ +	cancel_delayed_work(&hdev->discov_off); +	hdev->discov_timeout = timeout; + +	/* Limited discoverable mode */ +	if (cp->val == 0x02) +		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +	else +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + +	hci_req_init(&req, hdev); + +	/* The procedure for LE-only controllers is much simpler - just +	 * update the advertising data. +	 */ +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		goto update_ad; +  	scan = SCAN_PAGE; -	if (cp->val) +	if (cp->val) { +		struct hci_cp_write_current_iac_lap hci_cp; + +		if (cp->val == 0x02) { +			/* Limited discoverable mode */ +			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2); +			hci_cp.iac_lap[0] = 0x00;	/* LIAC */ +			hci_cp.iac_lap[1] = 0x8b; +			hci_cp.iac_lap[2] = 0x9e; +			hci_cp.iac_lap[3] = 0x33;	/* GIAC */ +			hci_cp.iac_lap[4] = 0x8b; +			hci_cp.iac_lap[5] = 0x9e; +		} else { +			/* General discoverable mode */ +			hci_cp.num_iac = 1; +			hci_cp.iac_lap[0] = 0x33;	/* GIAC */ +			hci_cp.iac_lap[1] = 0x8b; +			hci_cp.iac_lap[2] = 0x9e; +		} + +		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP, +			    (hci_cp.num_iac * 3) + 1, &hci_cp); +  		scan |= SCAN_INQUIRY; -	else -		cancel_delayed_work(&hdev->discov_off); +	} else { +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +	} + +	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); + +update_ad: +	update_adv_data(&req); -	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +	err = hci_req_run(&req, set_discoverable_complete);  	if (err < 0)  		mgmt_pending_remove(cmd); -	if (cp->val) -		hdev->discov_timeout = timeout; -  failed:  	hci_dev_unlock(hdev);  	return err; @@ -993,6 +1520,9 @@ static void write_fast_connectable(struct hci_request *req, bool enable)  	struct hci_cp_write_page_scan_activity acp;  	u8 type; +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +		return; +  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)  		return; @@ -1000,15 +1530,15 @@ static void write_fast_connectable(struct hci_request *req, bool enable)  		type = PAGE_SCAN_TYPE_INTERLACED;  		/* 160 msec page scan interval */ -		acp.interval = __constant_cpu_to_le16(0x0100); +		acp.interval = cpu_to_le16(0x0100);  	} else {  		type = PAGE_SCAN_TYPE_STANDARD;	/* default */  		/* default 1.28 sec page scan */ -		acp.interval = __constant_cpu_to_le16(0x0800); +		acp.interval = cpu_to_le16(0x0800);  	} -	acp.window = __constant_cpu_to_le16(0x0012); +	acp.window = cpu_to_le16(0x0012);  	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||  	    __cpu_to_le16(hdev->page_scan_window) != acp.window) @@ -1022,6 +1552,8 @@ static void write_fast_connectable(struct hci_request *req, bool enable)  static void set_connectable_complete(struct hci_dev *hdev, u8 status)  {  	struct pending_cmd *cmd; +	struct mgmt_mode *cp; +	bool changed;  	BT_DBG("status 0x%02x", status); @@ -1031,14 +1563,56 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)  	if (!cmd)  		goto unlock; +	if (status) { +		u8 mgmt_err = mgmt_status(status); +		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); +		goto remove_cmd; +	} + +	cp = cmd->param; +	if (cp->val) +		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); +	else +		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); +  	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); +	if (changed) +		new_settings(hdev, cmd->sk); + +remove_cmd:  	mgmt_pending_remove(cmd);  unlock:  	hci_dev_unlock(hdev);  } +static int set_connectable_update_settings(struct hci_dev *hdev, +					   struct sock *sk, u8 val) +{ +	bool changed = false; +	int err; + +	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) +		changed = true; + +	if (val) { +		set_bit(HCI_CONNECTABLE, &hdev->dev_flags); +	} else { +		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); +		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); +	} + +	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); +	if (err < 0) +		return err; + +	if (changed) +		return new_settings(hdev, sk); + +	return 0; +} +  static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,  			   u16 len)  { @@ -1050,9 +1624,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,  	BT_DBG("request for %s", hdev->name); -	if (!lmp_bredr_capable(hdev)) +	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && +	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, -				  MGMT_STATUS_NOT_SUPPORTED); +				  MGMT_STATUS_REJECTED);  	if (cp->val != 0x00 && cp->val != 0x01)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, @@ -1061,25 +1636,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_dev_lock(hdev);  	if (!hdev_is_powered(hdev)) { -		bool changed = false; - -		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) -			changed = true; - -		if (cp->val) { -			set_bit(HCI_CONNECTABLE, &hdev->dev_flags); -		} else { -			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); -			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); -		} - -		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); -		if (err < 0) -			goto failed; - -		if (changed) -			err = new_settings(hdev, sk); - +		err = set_connectable_update_settings(hdev, sk, cp->val);  		goto failed;  	} @@ -1090,30 +1647,37 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,  		goto failed;  	} -	if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { -		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); -		goto failed; -	} -  	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);  	if (!cmd) {  		err = -ENOMEM;  		goto failed;  	} -	if (cp->val) { -		scan = SCAN_PAGE; -	} else { -		scan = 0; +	hci_req_init(&req, hdev); -		if (test_bit(HCI_ISCAN, &hdev->flags) && -		    hdev->discov_timeout > 0) -			cancel_delayed_work(&hdev->discov_off); -	} +	/* If BR/EDR is not enabled and we disable advertising as a +	 * by-product of disabling connectable, we need to update the +	 * advertising flags. +	 */ +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		if (!cp->val) { +			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); +		} +		update_adv_data(&req); +	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { +		if (cp->val) { +			scan = SCAN_PAGE; +		} else { +			scan = 0; -	hci_req_init(&req, hdev); +			if (test_bit(HCI_ISCAN, &hdev->flags) && +			    hdev->discov_timeout > 0) +				cancel_delayed_work(&hdev->discov_off); +		} -	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +	}  	/* If we're going from non-connectable to connectable or  	 * vice-versa when fast connectable is enabled ensure that fast @@ -1124,9 +1688,20 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,  	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))  		write_fast_connectable(&req, false); +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && +	    hci_conn_num(hdev, LE_LINK) == 0) { +		disable_advertising(&req); +		enable_advertising(&req); +	} +  	err = hci_req_run(&req, set_connectable_complete); -	if (err < 0) +	if (err < 0) {  		mgmt_pending_remove(cmd); +		if (err == -ENODATA) +			err = set_connectable_update_settings(hdev, sk, +							      cp->val); +		goto failed; +	}  failed:  	hci_dev_unlock(hdev); @@ -1137,6 +1712,7 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,  			u16 len)  {  	struct mgmt_mode *cp = data; +	bool changed;  	int err;  	BT_DBG("request for %s", hdev->name); @@ -1148,17 +1724,18 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_dev_lock(hdev);  	if (cp->val) -		set_bit(HCI_PAIRABLE, &hdev->dev_flags); +		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);  	else -		clear_bit(HCI_PAIRABLE, &hdev->dev_flags); +		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);  	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);  	if (err < 0) -		goto failed; +		goto unlock; -	err = new_settings(hdev, sk); +	if (changed) +		err = new_settings(hdev, sk); -failed: +unlock:  	hci_dev_unlock(hdev);  	return err;  } @@ -1168,14 +1745,15 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,  {  	struct mgmt_mode *cp = data;  	struct pending_cmd *cmd; -	u8 val; +	u8 val, status;  	int err;  	BT_DBG("request for %s", hdev->name); -	if (!lmp_bredr_capable(hdev)) +	status = mgmt_bredr_support(hdev); +	if (status)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, -				  MGMT_STATUS_NOT_SUPPORTED); +				  status);  	if (cp->val != 0x00 && cp->val != 0x01)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, @@ -1236,11 +1814,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  {  	struct mgmt_mode *cp = data;  	struct pending_cmd *cmd; -	u8 val; +	u8 status;  	int err;  	BT_DBG("request for %s", hdev->name); +	status = mgmt_bredr_support(hdev); +	if (status) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); +  	if (!lmp_ssp_capable(hdev))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,  				  MGMT_STATUS_NOT_SUPPORTED); @@ -1251,14 +1833,20 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  	hci_dev_lock(hdev); -	val = !!cp->val; -  	if (!hdev_is_powered(hdev)) { -		bool changed = false; +		bool changed; -		if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { -			change_bit(HCI_SSP_ENABLED, &hdev->dev_flags); -			changed = true; +		if (cp->val) { +			changed = !test_and_set_bit(HCI_SSP_ENABLED, +						    &hdev->dev_flags); +		} else { +			changed = test_and_clear_bit(HCI_SSP_ENABLED, +						     &hdev->dev_flags); +			if (!changed) +				changed = test_and_clear_bit(HCI_HS_ENABLED, +							     &hdev->dev_flags); +			else +				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);  		}  		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); @@ -1271,13 +1859,14 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  		goto failed;  	} -	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { +	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) || +	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {  		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,  				 MGMT_STATUS_BUSY);  		goto failed;  	} -	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) { +	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {  		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);  		goto failed;  	} @@ -1288,7 +1877,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  		goto failed;  	} -	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val); +	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);  	if (err < 0) {  		mgmt_pending_remove(cmd);  		goto failed; @@ -1302,23 +1891,90 @@ failed:  static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  {  	struct mgmt_mode *cp = data; +	bool changed; +	u8 status; +	int err;  	BT_DBG("request for %s", hdev->name); -	if (!enable_hs) +	status = mgmt_bredr_support(hdev); +	if (status) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); + +	if (!lmp_ssp_capable(hdev))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,  				  MGMT_STATUS_NOT_SUPPORTED); +	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, +				  MGMT_STATUS_REJECTED); +  	if (cp->val != 0x00 && cp->val != 0x01)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,  				  MGMT_STATUS_INVALID_PARAMS); -	if (cp->val) -		set_bit(HCI_HS_ENABLED, &hdev->dev_flags); -	else -		clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); +	hci_dev_lock(hdev); + +	if (cp->val) { +		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags); +	} else { +		if (hdev_is_powered(hdev)) { +			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, +					 MGMT_STATUS_REJECTED); +			goto unlock; +		} + +		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); +	} + +	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); +	if (err < 0) +		goto unlock; + +	if (changed) +		err = new_settings(hdev, sk); + +unlock: +	hci_dev_unlock(hdev); +	return err; +} + +static void le_enable_complete(struct hci_dev *hdev, u8 status) +{ +	struct cmd_lookup match = { NULL, hdev }; -	return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); +	if (status) { +		u8 mgmt_err = mgmt_status(status); + +		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, +				     &mgmt_err); +		return; +	} + +	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); + +	new_settings(hdev, match.sk); + +	if (match.sk) +		sock_put(match.sk); + +	/* Make sure the controller has a good default for +	 * advertising data. Restrict the update to when LE +	 * has actually been enabled. During power on, the +	 * update in powered_update_hci will take care of it. +	 */ +	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { +		struct hci_request req; + +		hci_dev_lock(hdev); + +		hci_req_init(&req, hdev); +		update_adv_data(&req); +		update_scan_rsp_data(&req); +		hci_req_run(&req, NULL); + +		hci_dev_unlock(hdev); +	}  }  static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) @@ -1326,6 +1982,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  	struct mgmt_mode *cp = data;  	struct hci_cp_write_le_host_supported hci_cp;  	struct pending_cmd *cmd; +	struct hci_request req;  	int err;  	u8 val, enabled; @@ -1340,7 +1997,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  				  MGMT_STATUS_INVALID_PARAMS);  	/* LE-only devices do not allow toggling LE on/off */ -	if (!lmp_bredr_capable(hdev)) +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))  		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,  				  MGMT_STATUS_REJECTED); @@ -1357,6 +2014,11 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  			changed = true;  		} +		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { +			clear_bit(HCI_ADVERTISING, &hdev->dev_flags); +			changed = true; +		} +  		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);  		if (err < 0)  			goto unlock; @@ -1367,7 +2029,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  		goto unlock;  	} -	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { +	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) || +	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {  		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,  				 MGMT_STATUS_BUSY);  		goto unlock; @@ -1379,15 +2042,22 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  		goto unlock;  	} +	hci_req_init(&req, hdev); +  	memset(&hci_cp, 0, sizeof(hci_cp));  	if (val) {  		hci_cp.le = val;  		hci_cp.simul = lmp_le_br_capable(hdev); +	} else { +		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) +			disable_advertising(&req);  	} -	err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), -			   &hci_cp); +	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), +		    &hci_cp); + +	err = hci_req_run(&req, le_enable_complete);  	if (err < 0)  		mgmt_pending_remove(cmd); @@ -1564,7 +2234,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,  	}  	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { -		err = hci_uuids_clear(hdev); +		hci_uuids_clear(hdev);  		if (enable_service_cache(hdev)) {  			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, @@ -1704,15 +2374,22 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,  {  	struct mgmt_cp_load_link_keys *cp = data;  	u16 key_count, expected_len; +	bool changed;  	int i; +	BT_DBG("request for %s", hdev->name); + +	if (!lmp_bredr_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, +				  MGMT_STATUS_NOT_SUPPORTED); +  	key_count = __le16_to_cpu(cp->key_count);  	expected_len = sizeof(*cp) + key_count *  					sizeof(struct mgmt_link_key_info);  	if (expected_len != len) {  		BT_ERR("load_link_keys: expected %u bytes, got %u bytes", -		       len, expected_len); +		       expected_len, len);  		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,  				  MGMT_STATUS_INVALID_PARAMS);  	} @@ -1727,7 +2404,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,  	for (i = 0; i < key_count; i++) {  		struct mgmt_link_key_info *key = &cp->keys[i]; -		if (key->addr.type != BDADDR_BREDR) +		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)  			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,  					  MGMT_STATUS_INVALID_PARAMS);  	} @@ -1737,9 +2414,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_link_keys_clear(hdev);  	if (cp->debug_keys) -		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); +		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);  	else -		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); +		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); + +	if (changed) +		new_settings(hdev, NULL);  	for (i = 0; i < key_count; i++) {  		struct mgmt_link_key_info *key = &cp->keys[i]; @@ -1799,10 +2479,22 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,  		goto unlock;  	} -	if (cp->addr.type == BDADDR_BREDR) +	if (cp->addr.type == BDADDR_BREDR) {  		err = hci_remove_link_key(hdev, &cp->addr.bdaddr); -	else -		err = hci_remove_ltk(hdev, &cp->addr.bdaddr); +	} else { +		u8 addr_type; + +		if (cp->addr.type == BDADDR_LE_PUBLIC) +			addr_type = ADDR_LE_DEV_PUBLIC; +		else +			addr_type = ADDR_LE_DEV_RANDOM; + +		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); + +		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); + +		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); +	}  	if (err < 0) {  		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, @@ -2126,6 +2818,16 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)  	mgmt_pending_remove(cmd);  } +void mgmt_smp_complete(struct hci_conn *conn, bool complete) +{ +	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; +	struct pending_cmd *cmd; + +	cmd = find_pairing(conn); +	if (cmd) +		pairing_complete(cmd, status); +} +  static void pairing_complete_cb(struct hci_conn *conn, u8 status)  {  	struct pending_cmd *cmd; @@ -2139,7 +2841,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)  		pairing_complete(cmd, mgmt_status(status));  } -static void le_connect_complete_cb(struct hci_conn *conn, u8 status) +static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)  {  	struct pending_cmd *cmd; @@ -2185,17 +2887,24 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,  	}  	sec_level = BT_SECURITY_MEDIUM; -	if (cp->io_cap == 0x03) -		auth_type = HCI_AT_DEDICATED_BONDING; -	else -		auth_type = HCI_AT_DEDICATED_BONDING_MITM; +	auth_type = HCI_AT_DEDICATED_BONDING; -	if (cp->addr.type == BDADDR_BREDR) -		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, -				   cp->addr.type, sec_level, auth_type); -	else -		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, -				   cp->addr.type, sec_level, auth_type); +	if (cp->addr.type == BDADDR_BREDR) { +		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, +				       auth_type); +	} else { +		u8 addr_type; + +		/* Convert from L2CAP channel address type to HCI address type +		 */ +		if (cp->addr.type == BDADDR_LE_PUBLIC) +			addr_type = ADDR_LE_DEV_PUBLIC; +		else +			addr_type = ADDR_LE_DEV_RANDOM; + +		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, +				      sec_level, auth_type); +	}  	if (IS_ERR(conn)) {  		int status; @@ -2226,13 +2935,16 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,  	}  	/* For LE, just connecting isn't a proof that the pairing finished */ -	if (cp->addr.type == BDADDR_BREDR) +	if (cp->addr.type == BDADDR_BREDR) {  		conn->connect_cfm_cb = pairing_complete_cb; -	else -		conn->connect_cfm_cb = le_connect_complete_cb; +		conn->security_cfm_cb = pairing_complete_cb; +		conn->disconn_cfm_cb = pairing_complete_cb; +	} else { +		conn->connect_cfm_cb = le_pairing_complete_cb; +		conn->security_cfm_cb = le_pairing_complete_cb; +		conn->disconn_cfm_cb = le_pairing_complete_cb; +	} -	conn->security_cfm_cb = pairing_complete_cb; -	conn->disconn_cfm_cb = pairing_complete_cb;  	conn->io_capability = cp->io_cap;  	cmd->user_data = conn; @@ -2319,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,  	}  	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { -		/* Continue with pairing via SMP */ +		/* Continue with pairing via SMP. The hdev lock must be +		 * released as SMP may try to recquire it for crypto +		 * purposes. +		 */ +		hci_dev_unlock(hdev);  		err = smp_user_confirm_reply(conn, mgmt_op, passkey); +		hci_dev_lock(hdev);  		if (!err)  			err = cmd_complete(sk, hdev->id, mgmt_op, @@ -2515,8 +3232,11 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,  		update_eir(&req);  	} +	/* The name is stored in the scan response data and so +	 * no need to udpate the advertising data here. +	 */  	if (lmp_le_capable(hdev)) -		hci_update_ad(&req); +		update_scan_rsp_data(&req);  	err = hci_req_run(&req, set_name_complete);  	if (err < 0) @@ -2561,7 +3281,12 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,  		goto unlock;  	} -	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); +	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) +		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA, +				   0, NULL); +	else +		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); +  	if (err < 0)  		mgmt_pending_remove(cmd); @@ -2573,23 +3298,46 @@ unlock:  static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,  			       void *data, u16 len)  { -	struct mgmt_cp_add_remote_oob_data *cp = data; -	u8 status;  	int err;  	BT_DBG("%s ", hdev->name);  	hci_dev_lock(hdev); -	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash, -				      cp->randomizer); -	if (err < 0) -		status = MGMT_STATUS_FAILED; -	else -		status = MGMT_STATUS_SUCCESS; +	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) { +		struct mgmt_cp_add_remote_oob_data *cp = data; +		u8 status; -	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, -			   &cp->addr, sizeof(cp->addr)); +		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, +					      cp->hash, cp->randomizer); +		if (err < 0) +			status = MGMT_STATUS_FAILED; +		else +			status = MGMT_STATUS_SUCCESS; + +		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, +				   status, &cp->addr, sizeof(cp->addr)); +	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { +		struct mgmt_cp_add_remote_oob_ext_data *cp = data; +		u8 status; + +		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr, +						  cp->hash192, +						  cp->randomizer192, +						  cp->hash256, +						  cp->randomizer256); +		if (err < 0) +			status = MGMT_STATUS_FAILED; +		else +			status = MGMT_STATUS_SUCCESS; + +		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, +				   status, &cp->addr, sizeof(cp->addr)); +	} else { +		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len); +		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, +				 MGMT_STATUS_INVALID_PARAMS); +	}  	hci_dev_unlock(hdev);  	return err; @@ -2642,6 +3390,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)  static void start_discovery_complete(struct hci_dev *hdev, u8 status)  { +	unsigned long timeout = 0; +  	BT_DBG("status %d", status);  	if (status) { @@ -2657,13 +3407,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)  	switch (hdev->discovery.type) {  	case DISCOV_TYPE_LE: -		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, -				   DISCOV_LE_TIMEOUT); +		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);  		break;  	case DISCOV_TYPE_INTERLEAVED: -		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, -				   DISCOV_INTERLEAVED_TIMEOUT); +		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);  		break;  	case DISCOV_TYPE_BREDR: @@ -2672,6 +3420,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)  	default:  		BT_ERR("Invalid discovery type %d", hdev->discovery.type);  	} + +	if (!timeout) +		return; + +	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);  }  static int start_discovery(struct sock *sk, struct hci_dev *hdev, @@ -2685,6 +3438,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,  	struct hci_request req;  	/* General inquiry access code (GIAC) */  	u8 lap[3] = { 0x33, 0x8b, 0x9e }; +	u8 status, own_addr_type;  	int err;  	BT_DBG("%s", hdev->name); @@ -2721,9 +3475,10 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,  	switch (hdev->discovery.type) {  	case DISCOV_TYPE_BREDR: -		if (!lmp_bredr_capable(hdev)) { +		status = mgmt_bredr_support(hdev); +		if (status) {  			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, -					 MGMT_STATUS_NOT_SUPPORTED); +					 status);  			mgmt_pending_remove(cmd);  			goto failed;  		} @@ -2745,39 +3500,54 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,  	case DISCOV_TYPE_LE:  	case DISCOV_TYPE_INTERLEAVED: -		if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { +		status = mgmt_le_support(hdev); +		if (status) {  			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, -					 MGMT_STATUS_NOT_SUPPORTED); +					 status);  			mgmt_pending_remove(cmd);  			goto failed;  		}  		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && -		    !lmp_bredr_capable(hdev)) { +		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {  			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,  					 MGMT_STATUS_NOT_SUPPORTED);  			mgmt_pending_remove(cmd);  			goto failed;  		} -		if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) { +		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {  			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,  					 MGMT_STATUS_REJECTED);  			mgmt_pending_remove(cmd);  			goto failed;  		} -		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { +		/* If controller is scanning, it means the background scanning +		 * is running. Thus, we should temporarily stop it in order to +		 * set the discovery scanning parameters. +		 */ +		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			hci_req_add_le_scan_disable(&req); + +		memset(¶m_cp, 0, sizeof(param_cp)); + +		/* All active scans will be done with either a resolvable +		 * private address (when privacy feature has been enabled) +		 * or unresolvable private address. +		 */ +		err = hci_update_random_address(&req, true, &own_addr_type); +		if (err < 0) {  			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, -					 MGMT_STATUS_BUSY); +					 MGMT_STATUS_FAILED);  			mgmt_pending_remove(cmd);  			goto failed;  		} -		memset(¶m_cp, 0, sizeof(param_cp));  		param_cp.type = LE_SCAN_ACTIVE;  		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);  		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); +		param_cp.own_address_type = own_addr_type;  		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),  			    ¶m_cp); @@ -2844,10 +3614,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,  {  	struct mgmt_cp_stop_discovery *mgmt_cp = data;  	struct pending_cmd *cmd; -	struct hci_cp_remote_name_req_cancel cp; -	struct inquiry_entry *e;  	struct hci_request req; -	struct hci_cp_le_set_scan_enable enable_cp;  	int err;  	BT_DBG("%s", hdev->name); @@ -2876,55 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_req_init(&req, hdev); -	switch (hdev->discovery.state) { -	case DISCOVERY_FINDING: -		if (test_bit(HCI_INQUIRY, &hdev->flags)) { -			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL); -		} else { -			cancel_delayed_work(&hdev->le_scan_disable); +	hci_stop_discovery(&req); -			memset(&enable_cp, 0, sizeof(enable_cp)); -			enable_cp.enable = LE_SCAN_DISABLE; -			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, -				    sizeof(enable_cp), &enable_cp); -		} - -		break; - -	case DISCOVERY_RESOLVING: -		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, -						     NAME_PENDING); -		if (!e) { -			mgmt_pending_remove(cmd); -			err = cmd_complete(sk, hdev->id, -					   MGMT_OP_STOP_DISCOVERY, 0, -					   &mgmt_cp->type, -					   sizeof(mgmt_cp->type)); -			hci_discovery_set_state(hdev, DISCOVERY_STOPPED); -			goto unlock; -		} - -		bacpy(&cp.bdaddr, &e->data.bdaddr); -		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), -			    &cp); - -		break; - -	default: -		BT_DBG("unknown discovery state %u", hdev->discovery.state); - -		mgmt_pending_remove(cmd); -		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, -				   MGMT_STATUS_FAILED, &mgmt_cp->type, -				   sizeof(mgmt_cp->type)); +	err = hci_req_run(&req, stop_discovery_complete); +	if (!err) { +		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);  		goto unlock;  	} -	err = hci_req_run(&req, stop_discovery_complete); -	if (err < 0) -		mgmt_pending_remove(cmd); -	else -		hci_discovery_set_state(hdev, DISCOVERY_STOPPING); +	mgmt_pending_remove(cmd); + +	/* If no HCI commands were sent we're done */ +	if (err == -ENODATA) { +		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, +				   &mgmt_cp->type, sizeof(mgmt_cp->type)); +		hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +	}  unlock:  	hci_dev_unlock(hdev); @@ -2943,15 +3677,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,  	hci_dev_lock(hdev);  	if (!hci_discovery_active(hdev)) { -		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, -				 MGMT_STATUS_FAILED); +		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, +				   MGMT_STATUS_FAILED, &cp->addr, +				   sizeof(cp->addr));  		goto failed;  	}  	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);  	if (!e) { -		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, -				 MGMT_STATUS_INVALID_PARAMS); +		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, +				   MGMT_STATUS_INVALID_PARAMS, &cp->addr, +				   sizeof(cp->addr));  		goto failed;  	} @@ -3065,6 +3801,201 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,  	return err;  } +static void set_advertising_complete(struct hci_dev *hdev, u8 status) +{ +	struct cmd_lookup match = { NULL, hdev }; + +	if (status) { +		u8 mgmt_err = mgmt_status(status); + +		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, +				     cmd_status_rsp, &mgmt_err); +		return; +	} + +	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, +			     &match); + +	new_settings(hdev, match.sk); + +	if (match.sk) +		sock_put(match.sk); +} + +static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, +			   u16 len) +{ +	struct mgmt_mode *cp = data; +	struct pending_cmd *cmd; +	struct hci_request req; +	u8 val, enabled, status; +	int err; + +	BT_DBG("request for %s", hdev->name); + +	status = mgmt_le_support(hdev); +	if (status) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, +				  status); + +	if (cp->val != 0x00 && cp->val != 0x01) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, +				  MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	val = !!cp->val; +	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags); + +	/* The following conditions are ones which mean that we should +	 * not do any HCI communication but directly send a mgmt +	 * response to user space (after toggling the flag if +	 * necessary). +	 */ +	if (!hdev_is_powered(hdev) || val == enabled || +	    hci_conn_num(hdev, LE_LINK) > 0) { +		bool changed = false; + +		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { +			change_bit(HCI_ADVERTISING, &hdev->dev_flags); +			changed = true; +		} + +		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); +		if (err < 0) +			goto unlock; + +		if (changed) +			err = new_settings(hdev, sk); + +		goto unlock; +	} + +	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) || +	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { +		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, +				 MGMT_STATUS_BUSY); +		goto unlock; +	} + +	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); +	if (!cmd) { +		err = -ENOMEM; +		goto unlock; +	} + +	hci_req_init(&req, hdev); + +	if (val) +		enable_advertising(&req); +	else +		disable_advertising(&req); + +	err = hci_req_run(&req, set_advertising_complete); +	if (err < 0) +		mgmt_pending_remove(cmd); + +unlock: +	hci_dev_unlock(hdev); +	return err; +} + +static int set_static_address(struct sock *sk, struct hci_dev *hdev, +			      void *data, u16 len) +{ +	struct mgmt_cp_set_static_address *cp = data; +	int err; + +	BT_DBG("%s", hdev->name); + +	if (!lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, +				  MGMT_STATUS_NOT_SUPPORTED); + +	if (hdev_is_powered(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, +				  MGMT_STATUS_REJECTED); + +	if (bacmp(&cp->bdaddr, BDADDR_ANY)) { +		if (!bacmp(&cp->bdaddr, BDADDR_NONE)) +			return cmd_status(sk, hdev->id, +					  MGMT_OP_SET_STATIC_ADDRESS, +					  MGMT_STATUS_INVALID_PARAMS); + +		/* Two most significant bits shall be set */ +		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) +			return cmd_status(sk, hdev->id, +					  MGMT_OP_SET_STATIC_ADDRESS, +					  MGMT_STATUS_INVALID_PARAMS); +	} + +	hci_dev_lock(hdev); + +	bacpy(&hdev->static_addr, &cp->bdaddr); + +	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0); + +	hci_dev_unlock(hdev); + +	return err; +} + +static int set_scan_params(struct sock *sk, struct hci_dev *hdev, +			   void *data, u16 len) +{ +	struct mgmt_cp_set_scan_params *cp = data; +	__u16 interval, window; +	int err; + +	BT_DBG("%s", hdev->name); + +	if (!lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, +				  MGMT_STATUS_NOT_SUPPORTED); + +	interval = __le16_to_cpu(cp->interval); + +	if (interval < 0x0004 || interval > 0x4000) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, +				  MGMT_STATUS_INVALID_PARAMS); + +	window = __le16_to_cpu(cp->window); + +	if (window < 0x0004 || window > 0x4000) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, +				  MGMT_STATUS_INVALID_PARAMS); + +	if (window > interval) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, +				  MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	hdev->le_scan_interval = interval; +	hdev->le_scan_window = window; + +	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); + +	/* If background scan is running, restart it so new parameters are +	 * loaded. +	 */ +	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && +	    hdev->discovery.state == DISCOVERY_STOPPED) { +		struct hci_request req; + +		hci_req_init(&req, hdev); + +		hci_req_add_le_scan_disable(&req); +		hci_req_add_le_passive_scan(&req); + +		hci_req_run(&req, NULL); +	} + +	hci_dev_unlock(hdev); + +	return err; +} +  static void fast_connectable_complete(struct hci_dev *hdev, u8 status)  {  	struct pending_cmd *cmd; @@ -3108,7 +4039,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,  	BT_DBG("%s", hdev->name); -	if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2) +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) || +	    hdev->hci_ver < BLUETOOTH_VER_1_2)  		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,  				  MGMT_STATUS_NOT_SUPPORTED); @@ -3162,15 +4094,411 @@ unlock:  	return err;  } +static void set_bredr_scan(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	u8 scan = 0; + +	/* Ensure that fast connectable is disabled. This function will +	 * not do anything if the page scan parameters are already what +	 * they should be. +	 */ +	write_fast_connectable(req, false); + +	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) +		scan |= SCAN_PAGE; +	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) +		scan |= SCAN_INQUIRY; + +	if (scan) +		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +} + +static void set_bredr_complete(struct hci_dev *hdev, u8 status) +{ +	struct pending_cmd *cmd; + +	BT_DBG("status 0x%02x", status); + +	hci_dev_lock(hdev); + +	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev); +	if (!cmd) +		goto unlock; + +	if (status) { +		u8 mgmt_err = mgmt_status(status); + +		/* We need to restore the flag if related HCI commands +		 * failed. +		 */ +		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + +		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); +	} else { +		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); +		new_settings(hdev, cmd->sk); +	} + +	mgmt_pending_remove(cmd); + +unlock: +	hci_dev_unlock(hdev); +} + +static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ +	struct mgmt_mode *cp = data; +	struct pending_cmd *cmd; +	struct hci_request req; +	int err; + +	BT_DBG("request for %s", hdev->name); + +	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, +				  MGMT_STATUS_NOT_SUPPORTED); + +	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, +				  MGMT_STATUS_REJECTED); + +	if (cp->val != 0x00 && cp->val != 0x01) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, +				  MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); +		goto unlock; +	} + +	if (!hdev_is_powered(hdev)) { +		if (!cp->val) { +			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); +			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); +			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags); +			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); +			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); +		} + +		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + +		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); +		if (err < 0) +			goto unlock; + +		err = new_settings(hdev, sk); +		goto unlock; +	} + +	/* Reject disabling when powered on */ +	if (!cp->val) { +		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, +				 MGMT_STATUS_REJECTED); +		goto unlock; +	} + +	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) { +		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, +				 MGMT_STATUS_BUSY); +		goto unlock; +	} + +	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len); +	if (!cmd) { +		err = -ENOMEM; +		goto unlock; +	} + +	/* We need to flip the bit already here so that update_adv_data +	 * generates the correct flags. +	 */ +	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + +	hci_req_init(&req, hdev); + +	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) +		set_bredr_scan(&req); + +	/* Since only the advertising data flags will change, there +	 * is no need to update the scan response data. +	 */ +	update_adv_data(&req); + +	err = hci_req_run(&req, set_bredr_complete); +	if (err < 0) +		mgmt_pending_remove(cmd); + +unlock: +	hci_dev_unlock(hdev); +	return err; +} + +static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, +			   void *data, u16 len) +{ +	struct mgmt_mode *cp = data; +	struct pending_cmd *cmd; +	u8 val, status; +	int err; + +	BT_DBG("request for %s", hdev->name); + +	status = mgmt_bredr_support(hdev); +	if (status) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, +				  status); + +	if (!lmp_sc_capable(hdev) && +	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, +				  MGMT_STATUS_NOT_SUPPORTED); + +	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, +				  MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	if (!hdev_is_powered(hdev)) { +		bool changed; + +		if (cp->val) { +			changed = !test_and_set_bit(HCI_SC_ENABLED, +						    &hdev->dev_flags); +			if (cp->val == 0x02) +				set_bit(HCI_SC_ONLY, &hdev->dev_flags); +			else +				clear_bit(HCI_SC_ONLY, &hdev->dev_flags); +		} else { +			changed = test_and_clear_bit(HCI_SC_ENABLED, +						     &hdev->dev_flags); +			clear_bit(HCI_SC_ONLY, &hdev->dev_flags); +		} + +		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); +		if (err < 0) +			goto failed; + +		if (changed) +			err = new_settings(hdev, sk); + +		goto failed; +	} + +	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { +		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, +				 MGMT_STATUS_BUSY); +		goto failed; +	} + +	val = !!cp->val; + +	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && +	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) { +		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); +		goto failed; +	} + +	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); +	if (!cmd) { +		err = -ENOMEM; +		goto failed; +	} + +	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val); +	if (err < 0) { +		mgmt_pending_remove(cmd); +		goto failed; +	} + +	if (cp->val == 0x02) +		set_bit(HCI_SC_ONLY, &hdev->dev_flags); +	else +		clear_bit(HCI_SC_ONLY, &hdev->dev_flags); + +failed: +	hci_dev_unlock(hdev); +	return err; +} + +static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, +			  void *data, u16 len) +{ +	struct mgmt_mode *cp = data; +	bool changed; +	int err; + +	BT_DBG("request for %s", hdev->name); + +	if (cp->val != 0x00 && cp->val != 0x01) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, +				  MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	if (cp->val) +		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); +	else +		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); + +	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); +	if (err < 0) +		goto unlock; + +	if (changed) +		err = new_settings(hdev, sk); + +unlock: +	hci_dev_unlock(hdev); +	return err; +} + +static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, +		       u16 len) +{ +	struct mgmt_cp_set_privacy *cp = cp_data; +	bool changed; +	int err; + +	BT_DBG("request for %s", hdev->name); + +	if (!lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, +				  MGMT_STATUS_NOT_SUPPORTED); + +	if (cp->privacy != 0x00 && cp->privacy != 0x01) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, +				  MGMT_STATUS_INVALID_PARAMS); + +	if (hdev_is_powered(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, +				  MGMT_STATUS_REJECTED); + +	hci_dev_lock(hdev); + +	/* If user space supports this command it is also expected to +	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. +	 */ +	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); + +	if (cp->privacy) { +		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags); +		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); +		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); +	} else { +		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags); +		memset(hdev->irk, 0, sizeof(hdev->irk)); +		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); +	} + +	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); +	if (err < 0) +		goto unlock; + +	if (changed) +		err = new_settings(hdev, sk); + +unlock: +	hci_dev_unlock(hdev); +	return err; +} + +static bool irk_is_valid(struct mgmt_irk_info *irk) +{ +	switch (irk->addr.type) { +	case BDADDR_LE_PUBLIC: +		return true; + +	case BDADDR_LE_RANDOM: +		/* Two most significant bits shall be set */ +		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0) +			return false; +		return true; +	} + +	return false; +} + +static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, +		     u16 len) +{ +	struct mgmt_cp_load_irks *cp = cp_data; +	u16 irk_count, expected_len; +	int i, err; + +	BT_DBG("request for %s", hdev->name); + +	if (!lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, +				  MGMT_STATUS_NOT_SUPPORTED); + +	irk_count = __le16_to_cpu(cp->irk_count); + +	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); +	if (expected_len != len) { +		BT_ERR("load_irks: expected %u bytes, got %u bytes", +		       expected_len, len); +		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, +				  MGMT_STATUS_INVALID_PARAMS); +	} + +	BT_DBG("%s irk_count %u", hdev->name, irk_count); + +	for (i = 0; i < irk_count; i++) { +		struct mgmt_irk_info *key = &cp->irks[i]; + +		if (!irk_is_valid(key)) +			return cmd_status(sk, hdev->id, +					  MGMT_OP_LOAD_IRKS, +					  MGMT_STATUS_INVALID_PARAMS); +	} + +	hci_dev_lock(hdev); + +	hci_smp_irks_clear(hdev); + +	for (i = 0; i < irk_count; i++) { +		struct mgmt_irk_info *irk = &cp->irks[i]; +		u8 addr_type; + +		if (irk->addr.type == BDADDR_LE_PUBLIC) +			addr_type = ADDR_LE_DEV_PUBLIC; +		else +			addr_type = ADDR_LE_DEV_RANDOM; + +		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val, +			    BDADDR_ANY); +	} + +	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); + +	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); + +	hci_dev_unlock(hdev); + +	return err; +} +  static bool ltk_is_valid(struct mgmt_ltk_info *key)  { -	if (key->authenticated != 0x00 && key->authenticated != 0x01) -		return false;  	if (key->master != 0x00 && key->master != 0x01)  		return false; -	if (!bdaddr_type_is_le(key->addr.type)) -		return false; -	return true; + +	switch (key->addr.type) { +	case BDADDR_LE_PUBLIC: +		return true; + +	case BDADDR_LE_RANDOM: +		/* Two most significant bits shall be set */ +		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0) +			return false; +		return true; +	} + +	return false;  }  static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, @@ -3180,13 +4508,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,  	u16 key_count, expected_len;  	int i, err; +	BT_DBG("request for %s", hdev->name); + +	if (!lmp_le_capable(hdev)) +		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, +				  MGMT_STATUS_NOT_SUPPORTED); +  	key_count = __le16_to_cpu(cp->key_count);  	expected_len = sizeof(*cp) + key_count *  					sizeof(struct mgmt_ltk_info);  	if (expected_len != len) {  		BT_ERR("load_keys: expected %u bytes, got %u bytes", -		       len, expected_len); +		       expected_len, len);  		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,  				  MGMT_STATUS_INVALID_PARAMS);  	} @@ -3208,17 +4542,32 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,  	for (i = 0; i < key_count; i++) {  		struct mgmt_ltk_info *key = &cp->keys[i]; -		u8 type; +		u8 type, addr_type, authenticated; + +		if (key->addr.type == BDADDR_LE_PUBLIC) +			addr_type = ADDR_LE_DEV_PUBLIC; +		else +			addr_type = ADDR_LE_DEV_RANDOM;  		if (key->master)  			type = HCI_SMP_LTK;  		else  			type = HCI_SMP_LTK_SLAVE; -		hci_add_ltk(hdev, &key->addr.bdaddr, -			    bdaddr_to_le(key->addr.type), -			    type, 0, key->authenticated, key->val, -			    key->enc_size, key->ediv, key->rand); +		switch (key->type) { +		case MGMT_LTK_UNAUTHENTICATED: +			authenticated = 0x00; +			break; +		case MGMT_LTK_AUTHENTICATED: +			authenticated = 0x01; +			break; +		default: +			continue; +		} + +		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type, +			    authenticated, key->val, key->enc_size, key->ediv, +			    key->rand);  	}  	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, @@ -3229,6 +4578,218 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,  	return err;  } +struct cmd_conn_lookup { +	struct hci_conn *conn; +	bool valid_tx_power; +	u8 mgmt_status; +}; + +static void get_conn_info_complete(struct pending_cmd *cmd, void *data) +{ +	struct cmd_conn_lookup *match = data; +	struct mgmt_cp_get_conn_info *cp; +	struct mgmt_rp_get_conn_info rp; +	struct hci_conn *conn = cmd->user_data; + +	if (conn != match->conn) +		return; + +	cp = (struct mgmt_cp_get_conn_info *) cmd->param; + +	memset(&rp, 0, sizeof(rp)); +	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); +	rp.addr.type = cp->addr.type; + +	if (!match->mgmt_status) { +		rp.rssi = conn->rssi; + +		if (match->valid_tx_power) { +			rp.tx_power = conn->tx_power; +			rp.max_tx_power = conn->max_tx_power; +		} else { +			rp.tx_power = HCI_TX_POWER_INVALID; +			rp.max_tx_power = HCI_TX_POWER_INVALID; +		} +	} + +	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, +		     match->mgmt_status, &rp, sizeof(rp)); + +	hci_conn_drop(conn); + +	mgmt_pending_remove(cmd); +} + +static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status) +{ +	struct hci_cp_read_rssi *cp; +	struct hci_conn *conn; +	struct cmd_conn_lookup match; +	u16 handle; + +	BT_DBG("status 0x%02x", status); + +	hci_dev_lock(hdev); + +	/* TX power data is valid in case request completed successfully, +	 * otherwise we assume it's not valid. At the moment we assume that +	 * either both or none of current and max values are valid to keep code +	 * simple. +	 */ +	match.valid_tx_power = !status; + +	/* Commands sent in request are either Read RSSI or Read Transmit Power +	 * Level so we check which one was last sent to retrieve connection +	 * handle.  Both commands have handle as first parameter so it's safe to +	 * cast data on the same command struct. +	 * +	 * First command sent is always Read RSSI and we fail only if it fails. +	 * In other case we simply override error to indicate success as we +	 * already remembered if TX power value is actually valid. +	 */ +	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); +	if (!cp) { +		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); +		status = 0; +	} + +	if (!cp) { +		BT_ERR("invalid sent_cmd in response"); +		goto unlock; +	} + +	handle = __le16_to_cpu(cp->handle); +	conn = hci_conn_hash_lookup_handle(hdev, handle); +	if (!conn) { +		BT_ERR("unknown handle (%d) in response", handle); +		goto unlock; +	} + +	match.conn = conn; +	match.mgmt_status = mgmt_status(status); + +	/* Cache refresh is complete, now reply for mgmt request for given +	 * connection only. +	 */ +	mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev, +			     get_conn_info_complete, &match); + +unlock: +	hci_dev_unlock(hdev); +} + +static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, +			 u16 len) +{ +	struct mgmt_cp_get_conn_info *cp = data; +	struct mgmt_rp_get_conn_info rp; +	struct hci_conn *conn; +	unsigned long conn_info_age; +	int err = 0; + +	BT_DBG("%s", hdev->name); + +	memset(&rp, 0, sizeof(rp)); +	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); +	rp.addr.type = cp->addr.type; + +	if (!bdaddr_type_is_valid(cp->addr.type)) +		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, +				    MGMT_STATUS_INVALID_PARAMS, +				    &rp, sizeof(rp)); + +	hci_dev_lock(hdev); + +	if (!hdev_is_powered(hdev)) { +		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, +				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); +		goto unlock; +	} + +	if (cp->addr.type == BDADDR_BREDR) +		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, +					       &cp->addr.bdaddr); +	else +		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); + +	if (!conn || conn->state != BT_CONNECTED) { +		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, +				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); +		goto unlock; +	} + +	/* To avoid client trying to guess when to poll again for information we +	 * calculate conn info age as random value between min/max set in hdev. +	 */ +	conn_info_age = hdev->conn_info_min_age + +			prandom_u32_max(hdev->conn_info_max_age - +					hdev->conn_info_min_age); + +	/* Query controller to refresh cached values if they are too old or were +	 * never read. +	 */ +	if (time_after(jiffies, conn->conn_info_timestamp + +		       msecs_to_jiffies(conn_info_age)) || +	    !conn->conn_info_timestamp) { +		struct hci_request req; +		struct hci_cp_read_tx_power req_txp_cp; +		struct hci_cp_read_rssi req_rssi_cp; +		struct pending_cmd *cmd; + +		hci_req_init(&req, hdev); +		req_rssi_cp.handle = cpu_to_le16(conn->handle); +		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp), +			    &req_rssi_cp); + +		/* For LE links TX power does not change thus we don't need to +		 * query for it once value is known. +		 */ +		if (!bdaddr_type_is_le(cp->addr.type) || +		    conn->tx_power == HCI_TX_POWER_INVALID) { +			req_txp_cp.handle = cpu_to_le16(conn->handle); +			req_txp_cp.type = 0x00; +			hci_req_add(&req, HCI_OP_READ_TX_POWER, +				    sizeof(req_txp_cp), &req_txp_cp); +		} + +		/* Max TX power needs to be read only once per connection */ +		if (conn->max_tx_power == HCI_TX_POWER_INVALID) { +			req_txp_cp.handle = cpu_to_le16(conn->handle); +			req_txp_cp.type = 0x01; +			hci_req_add(&req, HCI_OP_READ_TX_POWER, +				    sizeof(req_txp_cp), &req_txp_cp); +		} + +		err = hci_req_run(&req, conn_info_refresh_complete); +		if (err < 0) +			goto unlock; + +		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev, +				       data, len); +		if (!cmd) { +			err = -ENOMEM; +			goto unlock; +		} + +		hci_conn_hold(conn); +		cmd->user_data = conn; + +		conn->conn_info_timestamp = jiffies; +	} else { +		/* Cache is valid, just reply with values cached in hci_conn */ +		rp.rssi = conn->rssi; +		rp.tx_power = conn->tx_power; +		rp.max_tx_power = conn->max_tx_power; + +		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, +				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); +	} + +unlock: +	hci_dev_unlock(hdev); +	return err; +} +  static const struct mgmt_handler {  	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,  		     u16 data_len); @@ -3268,7 +4829,7 @@ static const struct mgmt_handler {  	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },  	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },  	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE }, -	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE }, +	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },  	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },  	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },  	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE }, @@ -3276,6 +4837,15 @@ static const struct mgmt_handler {  	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },  	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },  	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE }, +	{ set_advertising,        false, MGMT_SETTING_SIZE }, +	{ set_bredr,              false, MGMT_SETTING_SIZE }, +	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE }, +	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE }, +	{ set_secure_conn,        false, MGMT_SETTING_SIZE }, +	{ set_debug_keys,         false, MGMT_SETTING_SIZE }, +	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE }, +	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE }, +	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },  }; @@ -3320,6 +4890,13 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)  					 MGMT_STATUS_INVALID_INDEX);  			goto done;  		} + +		if (test_bit(HCI_SETUP, &hdev->dev_flags) || +		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +			err = cmd_status(sk, index, opcode, +					 MGMT_STATUS_INVALID_INDEX); +			goto done; +		}  	}  	if (opcode >= ARRAY_SIZE(mgmt_handlers) || @@ -3365,74 +4942,35 @@ done:  	return err;  } -static void cmd_status_rsp(struct pending_cmd *cmd, void *data) +void mgmt_index_added(struct hci_dev *hdev)  { -	u8 *status = data; - -	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); -	mgmt_pending_remove(cmd); -} - -int mgmt_index_added(struct hci_dev *hdev) -{ -	if (!mgmt_valid_hdev(hdev)) -		return -ENOTSUPP; +	if (hdev->dev_type != HCI_BREDR) +		return; -	return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); +	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);  } -int mgmt_index_removed(struct hci_dev *hdev) +void mgmt_index_removed(struct hci_dev *hdev)  {  	u8 status = MGMT_STATUS_INVALID_INDEX; -	if (!mgmt_valid_hdev(hdev)) -		return -ENOTSUPP; +	if (hdev->dev_type != HCI_BREDR) +		return;  	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); -	return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); +	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);  } -struct cmd_lookup { -	struct sock *sk; -	struct hci_dev *hdev; -	u8 mgmt_status; -}; - -static void settings_rsp(struct pending_cmd *cmd, void *data) +/* This function requires the caller holds hdev->lock */ +static void restart_le_auto_conns(struct hci_dev *hdev)  { -	struct cmd_lookup *match = data; - -	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); - -	list_del(&cmd->list); +	struct hci_conn_params *p; -	if (match->sk == NULL) { -		match->sk = cmd->sk; -		sock_hold(match->sk); +	list_for_each_entry(p, &hdev->le_conn_params, list) { +		if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) +			hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);  	} - -	mgmt_pending_free(cmd); -} - -static void set_bredr_scan(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	u8 scan = 0; - -	/* Ensure that fast connectable is disabled. This function will -	 * not do anything if the page scan parameters are already what -	 * they should be. -	 */ -	write_fast_connectable(req, false); - -	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) -		scan |= SCAN_PAGE; -	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) -		scan |= SCAN_INQUIRY; - -	if (scan) -		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);  }  static void powered_complete(struct hci_dev *hdev, u8 status) @@ -3443,6 +4981,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status)  	hci_dev_lock(hdev); +	restart_le_auto_conns(hdev); +  	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);  	new_settings(hdev, match.sk); @@ -3483,13 +5023,28 @@ static int powered_update_hci(struct hci_dev *hdev)  				    sizeof(cp), &cp);  	} +	if (lmp_le_capable(hdev)) { +		/* Make sure the controller has a good default for +		 * advertising data. This also applies to the case +		 * where BR/EDR was toggled during the AUTO_OFF phase. +		 */ +		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { +			update_adv_data(&req); +			update_scan_rsp_data(&req); +		} + +		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) +			enable_advertising(&req); +	} +  	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);  	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))  		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,  			    sizeof(link_sec), &link_sec);  	if (lmp_bredr_capable(hdev)) { -		set_bredr_scan(&req); +		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) +			set_bredr_scan(&req);  		update_class(&req);  		update_name(&req);  		update_eir(&req); @@ -3533,76 +5088,130 @@ new_settings:  	return err;  } -int mgmt_set_powered_failed(struct hci_dev *hdev, int err) +void mgmt_set_powered_failed(struct hci_dev *hdev, int err)  {  	struct pending_cmd *cmd;  	u8 status;  	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);  	if (!cmd) -		return -ENOENT; +		return;  	if (err == -ERFKILL)  		status = MGMT_STATUS_RFKILLED;  	else  		status = MGMT_STATUS_FAILED; -	err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); +	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);  	mgmt_pending_remove(cmd); +} -	return err; +void mgmt_discoverable_timeout(struct hci_dev *hdev) +{ +	struct hci_request req; + +	hci_dev_lock(hdev); + +	/* When discoverable timeout triggers, then just make sure +	 * the limited discoverable flag is cleared. Even in the case +	 * of a timeout triggered from general discoverable, it is +	 * safe to unconditionally clear the flag. +	 */ +	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); + +	hci_req_init(&req, hdev); +	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		u8 scan = SCAN_PAGE; +		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, +			    sizeof(scan), &scan); +	} +	update_class(&req); +	update_adv_data(&req); +	hci_req_run(&req, NULL); + +	hdev->discov_timeout = 0; + +	new_settings(hdev, NULL); + +	hci_dev_unlock(hdev);  } -int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) +void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)  { -	struct cmd_lookup match = { NULL, hdev }; -	bool changed = false; -	int err = 0; +	bool changed; + +	/* Nothing needed here if there's a pending command since that +	 * commands request completion callback takes care of everything +	 * necessary. +	 */ +	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) +		return; + +	/* Powering off may clear the scan mode - don't let that interfere */ +	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) +		return;  	if (discoverable) { -		if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) -			changed = true; +		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);  	} else { -		if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) -			changed = true; +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); +		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);  	} -	mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, -			     &match); - -	if (changed) -		err = new_settings(hdev, match.sk); +	if (changed) { +		struct hci_request req; -	if (match.sk) -		sock_put(match.sk); +		/* In case this change in discoverable was triggered by +		 * a disabling of connectable there could be a need to +		 * update the advertising flags. +		 */ +		hci_req_init(&req, hdev); +		update_adv_data(&req); +		hci_req_run(&req, NULL); -	return err; +		new_settings(hdev, NULL); +	}  } -int mgmt_connectable(struct hci_dev *hdev, u8 connectable) +void mgmt_connectable(struct hci_dev *hdev, u8 connectable)  { -	struct pending_cmd *cmd; -	bool changed = false; -	int err = 0; +	bool changed; -	if (connectable) { -		if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags)) -			changed = true; -	} else { -		if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags)) -			changed = true; -	} +	/* Nothing needed here if there's a pending command since that +	 * commands request completion callback takes care of everything +	 * necessary. +	 */ +	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) +		return; -	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); +	/* Powering off may clear the scan mode - don't let that interfere */ +	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) +		return; + +	if (connectable) +		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); +	else +		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);  	if (changed) -		err = new_settings(hdev, cmd ? cmd->sk : NULL); +		new_settings(hdev, NULL); +} -	return err; +void mgmt_advertising(struct hci_dev *hdev, u8 advertising) +{ +	/* Powering off may stop advertising - don't let that interfere */ +	if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) +		return; + +	if (advertising) +		set_bit(HCI_ADVERTISING, &hdev->dev_flags); +	else +		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);  } -int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) +void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)  {  	u8 mgmt_err = mgmt_status(status); @@ -3613,12 +5222,10 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)  	if (scan & SCAN_INQUIRY)  		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,  				     cmd_status_rsp, &mgmt_err); - -	return 0;  } -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, -		      bool persistent) +void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, +		       bool persistent)  {  	struct mgmt_ev_new_link_key ev; @@ -3631,35 +5238,129 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,  	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);  	ev.key.pin_len = key->pin_len; -	return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); +	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent) +static u8 mgmt_ltk_type(struct smp_ltk *ltk) +{ +	if (ltk->authenticated) +		return MGMT_LTK_AUTHENTICATED; + +	return MGMT_LTK_UNAUTHENTICATED; +} + +void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)  {  	struct mgmt_ev_new_long_term_key ev;  	memset(&ev, 0, sizeof(ev)); -	ev.store_hint = persistent; +	/* Devices using resolvable or non-resolvable random addresses +	 * without providing an indentity resolving key don't require +	 * to store long term keys. Their addresses will change the +	 * next time around. +	 * +	 * Only when a remote device provides an identity address +	 * make sure the long term key is stored. If the remote +	 * identity is known, the long term keys are internally +	 * mapped to the identity address. So allow static random +	 * and public addresses here. +	 */ +	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM && +	    (key->bdaddr.b[5] & 0xc0) != 0xc0) +		ev.store_hint = 0x00; +	else +		ev.store_hint = persistent; +  	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);  	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); -	ev.key.authenticated = key->authenticated; +	ev.key.type = mgmt_ltk_type(key);  	ev.key.enc_size = key->enc_size;  	ev.key.ediv = key->ediv; +	ev.key.rand = key->rand;  	if (key->type == HCI_SMP_LTK)  		ev.key.master = 1; -	memcpy(ev.key.rand, key->rand, sizeof(key->rand));  	memcpy(ev.key.val, key->val, sizeof(key->val)); -	return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), -			  NULL); +	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk) +{ +	struct mgmt_ev_new_irk ev; + +	memset(&ev, 0, sizeof(ev)); + +	/* For identity resolving keys from devices that are already +	 * using a public address or static random address, do not +	 * ask for storing this key. The identity resolving key really +	 * is only mandatory for devices using resovlable random +	 * addresses. +	 * +	 * Storing all identity resolving keys has the downside that +	 * they will be also loaded on next boot of they system. More +	 * identity resolving keys, means more time during scanning is +	 * needed to actually resolve these addresses. +	 */ +	if (bacmp(&irk->rpa, BDADDR_ANY)) +		ev.store_hint = 0x01; +	else +		ev.store_hint = 0x00; + +	bacpy(&ev.rpa, &irk->rpa); +	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); +	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); +	memcpy(ev.irk.val, irk->val, sizeof(irk->val)); + +	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, -			  u8 addr_type, u32 flags, u8 *name, u8 name_len, -			  u8 *dev_class) +void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, +		   bool persistent) +{ +	struct mgmt_ev_new_csrk ev; + +	memset(&ev, 0, sizeof(ev)); + +	/* Devices using resolvable or non-resolvable random addresses +	 * without providing an indentity resolving key don't require +	 * to store signature resolving keys. Their addresses will change +	 * the next time around. +	 * +	 * Only when a remote device provides an identity address +	 * make sure the signature resolving key is stored. So allow +	 * static random and public addresses here. +	 */ +	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM && +	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0) +		ev.store_hint = 0x00; +	else +		ev.store_hint = persistent; + +	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); +	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); +	ev.key.master = csrk->master; +	memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); + +	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); +} + +static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, +				  u8 data_len) +{ +	eir[eir_len++] = sizeof(type) + data_len; +	eir[eir_len++] = type; +	memcpy(&eir[eir_len], data, data_len); +	eir_len += data_len; + +	return eir_len; +} + +void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +			   u8 addr_type, u32 flags, u8 *name, u8 name_len, +			   u8 *dev_class)  {  	char buf[512];  	struct mgmt_ev_device_connected *ev = (void *) buf; @@ -3680,8 +5381,8 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  	ev->eir_len = cpu_to_le16(eir_len); -	return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, -			  sizeof(*ev) + eir_len, NULL); +	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, +		    sizeof(*ev) + eir_len, NULL);  }  static void disconnect_rsp(struct pending_cmd *cmd, void *data) @@ -3719,12 +5420,32 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)  	mgmt_pending_remove(cmd);  } -int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, -			     u8 link_type, u8 addr_type, u8 reason) +void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, +			      u8 link_type, u8 addr_type, u8 reason, +			      bool mgmt_connected)  {  	struct mgmt_ev_device_disconnected ev; +	struct pending_cmd *power_off;  	struct sock *sk = NULL; -	int err; + +	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); +	if (power_off) { +		struct mgmt_mode *cp = power_off->param; + +		/* The connection is still in hci_conn_hash so test for 1 +		 * instead of 0 to know if this is the last one. +		 */ +		if (!cp->val && hci_conn_count(hdev) == 1) { +			cancel_delayed_work(&hdev->power_off); +			queue_work(hdev->req_workqueue, &hdev->power_off.work); +		} +	} + +	if (!mgmt_connected) +		return; + +	if (link_type != ACL_LINK && link_type != LE_LINK) +		return;  	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); @@ -3732,56 +5453,74 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,  	ev.addr.type = link_to_bdaddr(link_type, addr_type);  	ev.reason = reason; -	err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), -			 sk); +	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);  	if (sk)  		sock_put(sk);  	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,  			     hdev); - -	return err;  } -int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, -			   u8 link_type, u8 addr_type, u8 status) +void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 link_type, u8 addr_type, u8 status)  { +	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); +	struct mgmt_cp_disconnect *cp;  	struct mgmt_rp_disconnect rp;  	struct pending_cmd *cmd; -	int err;  	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,  			     hdev);  	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);  	if (!cmd) -		return -ENOENT; +		return; + +	cp = cmd->param; + +	if (bacmp(bdaddr, &cp->addr.bdaddr)) +		return; + +	if (cp->addr.type != bdaddr_type) +		return;  	bacpy(&rp.addr.bdaddr, bdaddr); -	rp.addr.type = link_to_bdaddr(link_type, addr_type); +	rp.addr.type = bdaddr_type; -	err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, -			   mgmt_status(status), &rp, sizeof(rp)); +	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, +		     mgmt_status(status), &rp, sizeof(rp));  	mgmt_pending_remove(cmd); - -	return err;  } -int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, -			u8 addr_type, u8 status) +void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +			 u8 addr_type, u8 status)  {  	struct mgmt_ev_connect_failed ev; +	struct pending_cmd *power_off; + +	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); +	if (power_off) { +		struct mgmt_mode *cp = power_off->param; + +		/* The connection is still in hci_conn_hash so test for 1 +		 * instead of 0 to know if this is the last one. +		 */ +		if (!cp->val && hci_conn_count(hdev) == 1) { +			cancel_delayed_work(&hdev->power_off); +			queue_work(hdev->req_workqueue, &hdev->power_off.work); +		} +	}  	bacpy(&ev.addr.bdaddr, bdaddr);  	ev.addr.type = link_to_bdaddr(link_type, addr_type);  	ev.status = mgmt_status(status); -	return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); +	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) +void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)  {  	struct mgmt_ev_pin_code_request ev; @@ -3789,56 +5528,49 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)  	ev.addr.type = BDADDR_BREDR;  	ev.secure = secure; -	return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), -			  NULL); +	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, -				 u8 status) +void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, +				  u8 status)  {  	struct pending_cmd *cmd;  	struct mgmt_rp_pin_code_reply rp; -	int err;  	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);  	if (!cmd) -		return -ENOENT; +		return;  	bacpy(&rp.addr.bdaddr, bdaddr);  	rp.addr.type = BDADDR_BREDR; -	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, -			   mgmt_status(status), &rp, sizeof(rp)); +	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, +		     mgmt_status(status), &rp, sizeof(rp));  	mgmt_pending_remove(cmd); - -	return err;  } -int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, -				     u8 status) +void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, +				      u8 status)  {  	struct pending_cmd *cmd;  	struct mgmt_rp_pin_code_reply rp; -	int err;  	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);  	if (!cmd) -		return -ENOENT; +		return;  	bacpy(&rp.addr.bdaddr, bdaddr);  	rp.addr.type = BDADDR_BREDR; -	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, -			   mgmt_status(status), &rp, sizeof(rp)); +	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, +		     mgmt_status(status), &rp, sizeof(rp));  	mgmt_pending_remove(cmd); - -	return err;  }  int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, -			      u8 link_type, u8 addr_type, __le32 value, +			      u8 link_type, u8 addr_type, u32 value,  			      u8 confirm_hint)  {  	struct mgmt_ev_user_confirm_request ev; @@ -3848,7 +5580,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,  	bacpy(&ev.addr.bdaddr, bdaddr);  	ev.addr.type = link_to_bdaddr(link_type, addr_type);  	ev.confirm_hint = confirm_hint; -	ev.value = value; +	ev.value = cpu_to_le32(value);  	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),  			  NULL); @@ -3936,8 +5668,8 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,  	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, -		     u8 addr_type, u8 status) +void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +		      u8 addr_type, u8 status)  {  	struct mgmt_ev_auth_failed ev; @@ -3945,40 +5677,36 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  	ev.addr.type = link_to_bdaddr(link_type, addr_type);  	ev.status = mgmt_status(status); -	return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); +	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);  } -int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) +void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)  {  	struct cmd_lookup match = { NULL, hdev }; -	bool changed = false; -	int err = 0; +	bool changed;  	if (status) {  		u8 mgmt_err = mgmt_status(status);  		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,  				     cmd_status_rsp, &mgmt_err); -		return 0; +		return;  	} -	if (test_bit(HCI_AUTH, &hdev->flags)) { -		if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) -			changed = true; -	} else { -		if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) -			changed = true; -	} +	if (test_bit(HCI_AUTH, &hdev->flags)) +		changed = !test_and_set_bit(HCI_LINK_SECURITY, +					    &hdev->dev_flags); +	else +		changed = test_and_clear_bit(HCI_LINK_SECURITY, +					     &hdev->dev_flags);  	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,  			     &match);  	if (changed) -		err = new_settings(hdev, match.sk); +		new_settings(hdev, match.sk);  	if (match.sk)  		sock_put(match.sk); - -	return err;  }  static void clear_eir(struct hci_request *req) @@ -3996,38 +5724,41 @@ static void clear_eir(struct hci_request *req)  	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);  } -int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) +void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)  {  	struct cmd_lookup match = { NULL, hdev };  	struct hci_request req;  	bool changed = false; -	int err = 0;  	if (status) {  		u8 mgmt_err = mgmt_status(status);  		if (enable && test_and_clear_bit(HCI_SSP_ENABLED, -						 &hdev->dev_flags)) -			err = new_settings(hdev, NULL); +						 &hdev->dev_flags)) { +			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); +			new_settings(hdev, NULL); +		}  		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,  				     &mgmt_err); - -		return err; +		return;  	}  	if (enable) { -		if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) -			changed = true; +		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);  	} else { -		if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) -			changed = true; +		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); +		if (!changed) +			changed = test_and_clear_bit(HCI_HS_ENABLED, +						     &hdev->dev_flags); +		else +			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);  	}  	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);  	if (changed) -		err = new_settings(hdev, match.sk); +		new_settings(hdev, match.sk);  	if (match.sk)  		sock_put(match.sk); @@ -4040,8 +5771,43 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)  		clear_eir(&req);  	hci_req_run(&req, NULL); +} -	return err; +void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) +{ +	struct cmd_lookup match = { NULL, hdev }; +	bool changed = false; + +	if (status) { +		u8 mgmt_err = mgmt_status(status); + +		if (enable) { +			if (test_and_clear_bit(HCI_SC_ENABLED, +					       &hdev->dev_flags)) +				new_settings(hdev, NULL); +			clear_bit(HCI_SC_ONLY, &hdev->dev_flags); +		} + +		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev, +				     cmd_status_rsp, &mgmt_err); +		return; +	} + +	if (enable) { +		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags); +	} else { +		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); +		clear_bit(HCI_SC_ONLY, &hdev->dev_flags); +	} + +	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev, +			     settings_rsp, &match); + +	if (changed) +		new_settings(hdev, match.sk); + +	if (match.sk) +		sock_put(match.sk);  }  static void sk_lookup(struct pending_cmd *cmd, void *data) @@ -4054,33 +5820,30 @@ static void sk_lookup(struct pending_cmd *cmd, void *data)  	}  } -int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, -				   u8 status) +void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, +				    u8 status)  {  	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; -	int err = 0;  	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);  	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);  	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);  	if (!status) -		err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, -				 3, NULL); +		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3, +			   NULL);  	if (match.sk)  		sock_put(match.sk); - -	return err;  } -int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) +void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)  {  	struct mgmt_cp_set_local_name ev;  	struct pending_cmd *cmd;  	if (status) -		return 0; +		return;  	memset(&ev, 0, sizeof(ev));  	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); @@ -4094,106 +5857,95 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)  		 * HCI dev don't send any mgmt signals.  		 */  		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) -			return 0; +			return;  	} -	return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), -			  cmd ? cmd->sk : NULL); +	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), +		   cmd ? cmd->sk : NULL);  } -int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, -					    u8 *randomizer, u8 status) +void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, +				       u8 *randomizer192, u8 *hash256, +				       u8 *randomizer256, u8 status)  {  	struct pending_cmd *cmd; -	int err;  	BT_DBG("%s status %u", hdev->name, status);  	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);  	if (!cmd) -		return -ENOENT; +		return;  	if (status) { -		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, -				 mgmt_status(status)); +		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, +			   mgmt_status(status));  	} else { -		struct mgmt_rp_read_local_oob_data rp; - -		memcpy(rp.hash, hash, sizeof(rp.hash)); -		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); - -		err = cmd_complete(cmd->sk, hdev->id, -				   MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp, -				   sizeof(rp)); -	} - -	mgmt_pending_remove(cmd); - -	return err; -} +		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && +		    hash256 && randomizer256) { +			struct mgmt_rp_read_local_oob_ext_data rp; -int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) -{ -	struct cmd_lookup match = { NULL, hdev }; -	bool changed = false; -	int err = 0; +			memcpy(rp.hash192, hash192, sizeof(rp.hash192)); +			memcpy(rp.randomizer192, randomizer192, +			       sizeof(rp.randomizer192)); -	if (status) { -		u8 mgmt_err = mgmt_status(status); +			memcpy(rp.hash256, hash256, sizeof(rp.hash256)); +			memcpy(rp.randomizer256, randomizer256, +			       sizeof(rp.randomizer256)); -		if (enable && test_and_clear_bit(HCI_LE_ENABLED, -						 &hdev->dev_flags)) -			err = new_settings(hdev, NULL); - -		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, -				     &mgmt_err); +			cmd_complete(cmd->sk, hdev->id, +				     MGMT_OP_READ_LOCAL_OOB_DATA, 0, +				     &rp, sizeof(rp)); +		} else { +			struct mgmt_rp_read_local_oob_data rp; -		return err; -	} +			memcpy(rp.hash, hash192, sizeof(rp.hash)); +			memcpy(rp.randomizer, randomizer192, +			       sizeof(rp.randomizer)); -	if (enable) { -		if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags)) -			changed = true; -	} else { -		if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags)) -			changed = true; +			cmd_complete(cmd->sk, hdev->id, +				     MGMT_OP_READ_LOCAL_OOB_DATA, 0, +				     &rp, sizeof(rp)); +		}  	} -	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); - -	if (changed) -		err = new_settings(hdev, match.sk); - -	if (match.sk) -		sock_put(match.sk); - -	return err; +	mgmt_pending_remove(cmd);  } -int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, -		      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8 -		      ssp, u8 *eir, u16 eir_len) +void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, +		       u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp, +		       u8 scan_rsp_len)  {  	char buf[512];  	struct mgmt_ev_device_found *ev = (void *) buf; +	struct smp_irk *irk;  	size_t ev_size;  	if (!hci_discovery_active(hdev)) -		return -EPERM; +		return; -	/* Leave 5 bytes for a potential CoD field */ -	if (sizeof(*ev) + eir_len + 5 > sizeof(buf)) -		return -EINVAL; +	/* Make sure that the buffer is big enough. The 5 extra bytes +	 * are for the potential CoD field. +	 */ +	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf)) +		return;  	memset(buf, 0, sizeof(buf)); -	bacpy(&ev->addr.bdaddr, bdaddr); -	ev->addr.type = link_to_bdaddr(link_type, addr_type); +	irk = hci_get_irk(hdev, bdaddr, addr_type); +	if (irk) { +		bacpy(&ev->addr.bdaddr, &irk->bdaddr); +		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type); +	} else { +		bacpy(&ev->addr.bdaddr, bdaddr); +		ev->addr.type = link_to_bdaddr(link_type, addr_type); +	} +  	ev->rssi = rssi;  	if (cfm_name) -		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME); +		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);  	if (!ssp) -		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING); +		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);  	if (eir_len > 0)  		memcpy(ev->eir, eir, eir_len); @@ -4202,14 +5954,17 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,  					  dev_class, 3); -	ev->eir_len = cpu_to_le16(eir_len); -	ev_size = sizeof(*ev) + eir_len; +	if (scan_rsp_len > 0) +		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); + +	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); +	ev_size = sizeof(*ev) + eir_len + scan_rsp_len; -	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); +	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);  } -int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, -		     u8 addr_type, s8 rssi, u8 *name, u8 name_len) +void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, +		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)  {  	struct mgmt_ev_device_found *ev;  	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2]; @@ -4228,11 +5983,10 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  	ev->eir_len = cpu_to_le16(eir_len); -	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, -			  sizeof(*ev) + eir_len, NULL); +	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);  } -int mgmt_discovering(struct hci_dev *hdev, u8 discovering) +void mgmt_discovering(struct hci_dev *hdev, u8 discovering)  {  	struct mgmt_ev_discovering ev;  	struct pending_cmd *cmd; @@ -4256,7 +6010,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering)  	ev.type = hdev->discovery.type;  	ev.discovering = discovering; -	return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); +	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);  }  int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) @@ -4287,5 +6041,35 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  			  cmd ? cmd->sk : NULL);  } -module_param(enable_hs, bool, 0644); -MODULE_PARM_DESC(enable_hs, "Enable High Speed support"); +static void adv_enable_complete(struct hci_dev *hdev, u8 status) +{ +	BT_DBG("%s status %u", hdev->name, status); + +	/* Clear the advertising mgmt setting if we failed to re-enable it */ +	if (status) { +		clear_bit(HCI_ADVERTISING, &hdev->dev_flags); +		new_settings(hdev, NULL); +	} +} + +void mgmt_reenable_advertising(struct hci_dev *hdev) +{ +	struct hci_request req; + +	if (hci_conn_num(hdev, LE_LINK) > 0) +		return; + +	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) +		return; + +	hci_req_init(&req, hdev); +	enable_advertising(&req); + +	/* If this fails we have no option but to let user space know +	 * that we've disabled advertising. +	 */ +	if (hci_req_run(&req, adv_enable_complete) < 0) { +		clear_bit(HCI_ADVERTISING, &hdev->dev_flags); +		new_settings(hdev, NULL); +	} +} diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index ca957d34b0c..754b6fe4f74 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -186,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk)  	rfcomm_schedule();  } -static void rfcomm_l2data_ready(struct sock *sk, int bytes) +static void rfcomm_l2data_ready(struct sock *sk)  { -	BT_DBG("%p bytes %d", sk, bytes); +	BT_DBG("%p", sk);  	rfcomm_schedule();  } @@ -216,6 +216,7 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)  	switch (d->sec_level) {  	case BT_SECURITY_HIGH: +	case BT_SECURITY_FIPS:  		auth_type = HCI_AT_GENERAL_BONDING_MITM;  		break;  	case BT_SECURITY_MEDIUM: @@ -306,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)  	setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);  	skb_queue_head_init(&d->tx_queue); -	spin_lock_init(&d->lock); +	mutex_init(&d->lock);  	atomic_set(&d->refcnt, 1);  	rfcomm_dlc_clear_state(d); @@ -359,6 +360,11 @@ static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)  	return NULL;  } +static int rfcomm_check_channel(u8 channel) +{ +	return channel < 1 || channel > 30; +} +  static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)  {  	struct rfcomm_session *s; @@ -368,7 +374,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,  	BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",  	       d, d->state, src, dst, channel); -	if (channel < 1 || channel > 30) +	if (rfcomm_check_channel(channel))  		return -EINVAL;  	if (d->state != BT_OPEN && d->state != BT_CLOSED) @@ -425,6 +431,20 @@ int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 chann  	return r;  } +static void __rfcomm_dlc_disconn(struct rfcomm_dlc *d) +{ +	struct rfcomm_session *s = d->session; + +	d->state = BT_DISCONN; +	if (skb_queue_empty(&d->tx_queue)) { +		rfcomm_send_disc(s, d->dlci); +		rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT); +	} else { +		rfcomm_queue_disc(d); +		rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2); +	} +} +  static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)  {  	struct rfcomm_session *s = d->session; @@ -437,32 +457,29 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)  	switch (d->state) {  	case BT_CONNECT:  	case BT_CONFIG: +	case BT_OPEN: +	case BT_CONNECT2:  		if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {  			set_bit(RFCOMM_AUTH_REJECT, &d->flags);  			rfcomm_schedule(); -			break; +			return 0;  		} -		/* Fall through */ +	} +	switch (d->state) { +	case BT_CONNECT:  	case BT_CONNECTED: -		d->state = BT_DISCONN; -		if (skb_queue_empty(&d->tx_queue)) { -			rfcomm_send_disc(s, d->dlci); -			rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT); -		} else { -			rfcomm_queue_disc(d); -			rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2); -		} +		__rfcomm_dlc_disconn(d);  		break; -	case BT_OPEN: -	case BT_CONNECT2: -		if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { -			set_bit(RFCOMM_AUTH_REJECT, &d->flags); -			rfcomm_schedule(); +	case BT_CONFIG: +		if (s->state != BT_BOUND) { +			__rfcomm_dlc_disconn(d);  			break;  		} -		/* Fall through */ +		/* if closing a dlc in a session that hasn't been started, +		 * just close and unlink the dlc +		 */  	default:  		rfcomm_dlc_clear_timer(d); @@ -513,6 +530,25 @@ no_session:  	return r;  } +struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ +	struct rfcomm_session *s; +	struct rfcomm_dlc *dlc = NULL; +	u8 dlci; + +	if (rfcomm_check_channel(channel)) +		return ERR_PTR(-EINVAL); + +	rfcomm_lock(); +	s = rfcomm_session_get(src, dst); +	if (s) { +		dlci = __dlci(!s->initiator, channel); +		dlc = rfcomm_dlc_get(s, dlci); +	} +	rfcomm_unlock(); +	return dlc; +} +  int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)  {  	int len = skb->len; @@ -533,6 +569,20 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)  	return len;  } +void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb) +{ +	int len = skb->len; + +	BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + +	rfcomm_make_uih(skb, d->addr); +	skb_queue_tail(&d->tx_queue, skb); + +	if (d->state == BT_CONNECTED && +	    !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) +		rfcomm_schedule(); +} +  void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)  {  	BT_DBG("dlc %p state %ld", d, d->state); @@ -641,13 +691,13 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)  {  	struct rfcomm_session *s;  	struct list_head *p, *n; -	struct bt_sock *sk; +	struct l2cap_chan *chan;  	list_for_each_safe(p, n, &session_list) {  		s = list_entry(p, struct rfcomm_session, list); -		sk = bt_sk(s->sock->sk); +		chan = l2cap_pi(s->sock->sk)->chan; -		if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) && -				!bacmp(&sk->dst, dst)) +		if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) && +		    !bacmp(&chan->dst, dst))  			return s;  	}  	return NULL; @@ -694,6 +744,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,  	addr.l2_family = AF_BLUETOOTH;  	addr.l2_psm    = 0;  	addr.l2_cid    = 0; +	addr.l2_bdaddr_type = BDADDR_BREDR;  	*err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));  	if (*err < 0)  		goto failed; @@ -717,8 +768,9 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,  	bacpy(&addr.l2_bdaddr, dst);  	addr.l2_family = AF_BLUETOOTH; -	addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM); +	addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);  	addr.l2_cid    = 0; +	addr.l2_bdaddr_type = BDADDR_BREDR;  	*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);  	if (*err == 0 || *err == -EINPROGRESS)  		return s; @@ -732,11 +784,11 @@ failed:  void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst)  { -	struct sock *sk = s->sock->sk; +	struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;  	if (src) -		bacpy(src, &bt_sk(sk)->src); +		bacpy(src, &chan->src);  	if (dst) -		bacpy(dst, &bt_sk(sk)->dst); +		bacpy(dst, &chan->dst);  }  /* ---- RFCOMM frame sending ---- */ @@ -1941,12 +1993,11 @@ static void rfcomm_process_sessions(void)  			continue;  		} -		if (s->state == BT_LISTEN) { +		switch (s->state) { +		case BT_LISTEN:  			rfcomm_accept_connection(s);  			continue; -		} -		switch (s->state) {  		case BT_BOUND:  			s = rfcomm_check_connection(s);  			break; @@ -1981,8 +2032,9 @@ static int rfcomm_add_listener(bdaddr_t *ba)  	/* Bind socket */  	bacpy(&addr.l2_bdaddr, ba);  	addr.l2_family = AF_BLUETOOTH; -	addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM); +	addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);  	addr.l2_cid    = 0; +	addr.l2_bdaddr_type = BDADDR_BREDR;  	err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));  	if (err < 0) {  		BT_ERR("Bind failed %d", err); @@ -2082,7 +2134,8 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)  				set_bit(RFCOMM_SEC_PENDING, &d->flags);  				rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);  				continue; -			} else if (d->sec_level == BT_SECURITY_HIGH) { +			} else if (d->sec_level == BT_SECURITY_HIGH || +				   d->sec_level == BT_SECURITY_FIPS) {  				set_bit(RFCOMM_ENC_DROP, &d->flags);  				continue;  			} @@ -2112,12 +2165,11 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)  	rfcomm_lock();  	list_for_each_entry(s, &session_list, list) { +		struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;  		struct rfcomm_dlc *d;  		list_for_each_entry(d, &s->dlcs, list) { -			struct sock *sk = s->sock->sk; -  			seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n", -				   &bt_sk(sk)->src, &bt_sk(sk)->dst, +				   &chan->src, &chan->dst,  				   d->state, d->dlci, d->mtu,  				   d->rx_credits, d->tx_credits);  		} @@ -2155,13 +2207,6 @@ static int __init rfcomm_init(void)  		goto unregister;  	} -	if (bt_debugfs) { -		rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, -				bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops); -		if (!rfcomm_dlc_debugfs) -			BT_ERR("Failed to create RFCOMM debug file"); -	} -  	err = rfcomm_init_ttys();  	if (err < 0)  		goto stop; @@ -2172,6 +2217,13 @@ static int __init rfcomm_init(void)  	BT_INFO("RFCOMM ver %s", VERSION); +	if (IS_ERR_OR_NULL(bt_debugfs)) +		return 0; + +	rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, +						 bt_debugfs, NULL, +						 &rfcomm_dlc_debugfs_fops); +  	return 0;  cleanup: diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 30b3721dc6d..c603a5eb472 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -54,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)  	atomic_add(skb->len, &sk->sk_rmem_alloc);  	skb_queue_tail(&sk->sk_receive_queue, skb); -	sk->sk_data_ready(sk, skb->len); +	sk->sk_data_ready(sk);  	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)  		rfcomm_dlc_throttle(d); @@ -84,10 +84,11 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)  			sock_set_flag(sk, SOCK_ZAPPED);  			bt_accept_unlink(sk);  		} -		parent->sk_data_ready(parent, 0); +		parent->sk_data_ready(parent);  	} else {  		if (d->state == BT_CONNECTED) -			rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL); +			rfcomm_session_getaddr(d->session, +					       &rfcomm_pi(sk)->src, NULL);  		sk->sk_state_change(sk);  	} @@ -104,13 +105,18 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)  }  /* ---- Socket functions ---- */ -static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) +static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src)  {  	struct sock *sk = NULL;  	sk_for_each(sk, &rfcomm_sk_list.head) { -		if (rfcomm_pi(sk)->channel == channel && -				!bacmp(&bt_sk(sk)->src, src)) +		if (rfcomm_pi(sk)->channel != channel) +			continue; + +		if (bacmp(&rfcomm_pi(sk)->src, src)) +			continue; + +		if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN)  			break;  	} @@ -132,11 +138,11 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *  		if (rfcomm_pi(sk)->channel == channel) {  			/* Exact match. */ -			if (!bacmp(&bt_sk(sk)->src, src)) +			if (!bacmp(&rfcomm_pi(sk)->src, src))  				break;  			/* Closest match */ -			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) +			if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))  				sk1 = sk;  		}  	} @@ -330,6 +336,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr  {  	struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;  	struct sock *sk = sock->sk; +	int chan = sa->rc_channel;  	int err = 0;  	BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr); @@ -351,12 +358,12 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr  	write_lock(&rfcomm_sk_list.lock); -	if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) { +	if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {  		err = -EADDRINUSE;  	} else {  		/* Save source address */ -		bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr); -		rfcomm_pi(sk)->channel = sa->rc_channel; +		bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr); +		rfcomm_pi(sk)->channel = chan;  		sk->sk_state = BT_BOUND;  	} @@ -393,13 +400,14 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a  	}  	sk->sk_state = BT_CONNECT; -	bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); +	bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);  	rfcomm_pi(sk)->channel = sa->rc_channel;  	d->sec_level = rfcomm_pi(sk)->sec_level;  	d->role_switch = rfcomm_pi(sk)->role_switch; -	err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); +	err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, +			      sa->rc_channel);  	if (!err)  		err = bt_sock_wait_state(sk, BT_CONNECTED,  				sock_sndtimeo(sk, flags & O_NONBLOCK)); @@ -429,7 +437,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)  	}  	if (!rfcomm_pi(sk)->channel) { -		bdaddr_t *src = &bt_sk(sk)->src; +		bdaddr_t *src = &rfcomm_pi(sk)->src;  		u8 channel;  		err = -EINVAL; @@ -437,7 +445,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)  		write_lock(&rfcomm_sk_list.lock);  		for (channel = 1; channel < 31; channel++) -			if (!__rfcomm_get_sock_by_addr(channel, src)) { +			if (!__rfcomm_get_listen_sock_by_addr(channel, src)) {  				rfcomm_pi(sk)->channel = channel;  				err = 0;  				break; @@ -526,13 +534,17 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *  	BT_DBG("sock %p, sk %p", sock, sk); +	if (peer && sk->sk_state != BT_CONNECTED && +	    sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) +		return -ENOTCONN; +  	memset(sa, 0, sizeof(*sa));  	sa->rc_family  = AF_BLUETOOTH;  	sa->rc_channel = rfcomm_pi(sk)->channel;  	if (peer) -		bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst); +		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);  	else -		bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src); +		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);  	*len = sizeof(struct sockaddr_rc);  	return 0; @@ -544,7 +556,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	struct sock *sk = sock->sk;  	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;  	struct sk_buff *skb; -	int sent = 0; +	int sent;  	if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))  		return -ENOTCONN; @@ -559,6 +571,10 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	lock_sock(sk); +	sent = bt_sock_wait_ready(sk, msg->msg_flags); +	if (sent) +		goto done; +  	while (len) {  		size_t size = min_t(size_t, len, d->mtu);  		int err; @@ -594,6 +610,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  		len  -= size;  	} +done:  	release_sock(sk);  	return sent; @@ -608,7 +625,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {  		rfcomm_dlc_accept(d); -		msg->msg_namelen = 0;  		return 0;  	} @@ -642,6 +658,11 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u  			break;  		} +		if (opt & RFCOMM_LM_FIPS) { +			err = -EINVAL; +			break; +		} +  		if (opt & RFCOMM_LM_AUTH)  			rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;  		if (opt & RFCOMM_LM_ENCRYPT) @@ -732,8 +753,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c  static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)  {  	struct sock *sk = sock->sk; +	struct sock *l2cap_sk; +	struct l2cap_conn *conn;  	struct rfcomm_conninfo cinfo; -	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;  	int len, err = 0;  	u32 opt; @@ -755,7 +777,11 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u  			break;  		case BT_SECURITY_HIGH:  			opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | -							RFCOMM_LM_SECURE; +			      RFCOMM_LM_SECURE; +			break; +		case BT_SECURITY_FIPS: +			opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | +			      RFCOMM_LM_SECURE | RFCOMM_LM_FIPS;  			break;  		default:  			opt = 0; @@ -767,6 +793,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u  		if (put_user(opt, (u32 __user *) optval))  			err = -EFAULT; +  		break;  	case RFCOMM_CONNINFO: @@ -776,6 +803,9 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u  			break;  		} +		l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; +		conn = l2cap_pi(l2cap_sk)->chan->conn; +  		memset(&cinfo, 0, sizeof(cinfo));  		cinfo.hci_handle = conn->hcon->handle;  		memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); @@ -946,8 +976,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *  	bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);  	rfcomm_sock_init(sk, parent); -	bacpy(&bt_sk(sk)->src, &src); -	bacpy(&bt_sk(sk)->dst, &dst); +	bacpy(&rfcomm_pi(sk)->src, &src); +	bacpy(&rfcomm_pi(sk)->dst, &dst);  	rfcomm_pi(sk)->channel = channel;  	sk->sk_state = BT_CONFIG; @@ -974,7 +1004,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)  	sk_for_each(sk, &rfcomm_sk_list.head) {  		seq_printf(f, "%pMR %pMR %d %d\n", -			   &bt_sk(sk)->src, &bt_sk(sk)->dst, +			   &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,  			   sk->sk_state, rfcomm_pi(sk)->channel);  	} @@ -1044,15 +1074,15 @@ int __init rfcomm_init_sockets(void)  		goto error;  	} -	if (bt_debugfs) { -		rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, -				bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); -		if (!rfcomm_sock_debugfs) -			BT_ERR("Failed to create RFCOMM debug file"); -	} -  	BT_INFO("RFCOMM socket layer initialized"); +	if (IS_ERR_OR_NULL(bt_debugfs)) +		return 0; + +	rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, +						  bt_debugfs, NULL, +						  &rfcomm_sock_debugfs_fops); +  	return 0;  error: diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 84fcf9fff3e..8e385a0ae60 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -40,6 +40,7 @@  #define RFCOMM_TTY_MAJOR 216		/* device node major id of the usb/bluetooth.c driver */  #define RFCOMM_TTY_MINOR 0 +static DEFINE_MUTEX(rfcomm_ioctl_mutex);  static struct tty_driver *rfcomm_tty_driver;  struct rfcomm_dev { @@ -51,6 +52,8 @@ struct rfcomm_dev {  	unsigned long		flags;  	int			err; +	unsigned long		status;		/* don't export to userspace */ +  	bdaddr_t		src;  	bdaddr_t		dst;  	u8			channel; @@ -67,7 +70,7 @@ struct rfcomm_dev {  };  static LIST_HEAD(rfcomm_dev_list); -static DEFINE_SPINLOCK(rfcomm_dev_lock); +static DEFINE_MUTEX(rfcomm_dev_lock);  static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);  static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); @@ -82,10 +85,6 @@ static void rfcomm_dev_destruct(struct tty_port *port)  	BT_DBG("dev %p dlc %p", dev, dlc); -	spin_lock(&rfcomm_dev_lock); -	list_del(&dev->list); -	spin_unlock(&rfcomm_dev_lock); -  	rfcomm_dlc_lock(dlc);  	/* Detach DLC if it's owned by this dev */  	if (dlc->owner == dev) @@ -94,7 +93,12 @@ static void rfcomm_dev_destruct(struct tty_port *port)  	rfcomm_dlc_put(dlc); -	tty_unregister_device(rfcomm_tty_driver, dev->id); +	if (dev->tty_dev) +		tty_unregister_device(rfcomm_tty_driver, dev->id); + +	mutex_lock(&rfcomm_dev_lock); +	list_del(&dev->list); +	mutex_unlock(&rfcomm_dev_lock);  	kfree(dev); @@ -107,8 +111,12 @@ static void rfcomm_dev_destruct(struct tty_port *port)  static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)  {  	struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); +	int err; -	return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); +	err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); +	if (err) +		set_bit(TTY_IO_ERROR, &tty->flags); +	return err;  }  /* we block the open until the dlc->state becomes BT_CONNECTED */ @@ -138,7 +146,7 @@ static const struct tty_port_operations rfcomm_port_ops = {  	.carrier_raised = rfcomm_dev_carrier_raised,  }; -static struct rfcomm_dev *__rfcomm_dev_get(int id) +static struct rfcomm_dev *__rfcomm_dev_lookup(int id)  {  	struct rfcomm_dev *dev; @@ -153,36 +161,41 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)  {  	struct rfcomm_dev *dev; -	spin_lock(&rfcomm_dev_lock); +	mutex_lock(&rfcomm_dev_lock); -	dev = __rfcomm_dev_get(id); +	dev = __rfcomm_dev_lookup(id); -	if (dev) { -		if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) -			dev = NULL; -		else -			tty_port_get(&dev->port); -	} +	if (dev && !tty_port_get(&dev->port)) +		dev = NULL; -	spin_unlock(&rfcomm_dev_lock); +	mutex_unlock(&rfcomm_dev_lock);  	return dev;  } -static struct device *rfcomm_get_device(struct rfcomm_dev *dev) +static void rfcomm_reparent_device(struct rfcomm_dev *dev)  {  	struct hci_dev *hdev;  	struct hci_conn *conn;  	hdev = hci_get_route(&dev->dst, &dev->src);  	if (!hdev) -		return NULL; +		return; +	/* The lookup results are unsafe to access without the +	 * hci device lock (FIXME: why is this not documented?) +	 */ +	hci_dev_lock(hdev);  	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst); -	hci_dev_put(hdev); +	/* Just because the acl link is in the hash table is no +	 * guarantee the sysfs device has been added ... +	 */ +	if (conn && device_is_registered(&conn->dev)) +		device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT); -	return conn ? &conn->dev : NULL; +	hci_dev_unlock(hdev); +	hci_dev_put(hdev);  }  static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) @@ -200,19 +213,18 @@ static ssize_t show_channel(struct device *tty_dev, struct device_attribute *att  static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);  static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); -static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) +static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req, +					   struct rfcomm_dlc *dlc)  {  	struct rfcomm_dev *dev, *entry;  	struct list_head *head = &rfcomm_dev_list;  	int err = 0; -	BT_DBG("id %d channel %d", req->dev_id, req->channel); -  	dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);  	if (!dev) -		return -ENOMEM; +		return ERR_PTR(-ENOMEM); -	spin_lock(&rfcomm_dev_lock); +	mutex_lock(&rfcomm_dev_lock);  	if (req->dev_id < 0) {  		dev->id = 0; @@ -293,22 +305,37 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)  	   holds reference to this module. */  	__module_get(THIS_MODULE); +	mutex_unlock(&rfcomm_dev_lock); +	return dev; +  out: -	spin_unlock(&rfcomm_dev_lock); +	mutex_unlock(&rfcomm_dev_lock); +	kfree(dev); +	return ERR_PTR(err); +} + +static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) +{ +	struct rfcomm_dev *dev; +	struct device *tty; + +	BT_DBG("id %d channel %d", req->dev_id, req->channel); -	if (err < 0) -		goto free; +	dev = __rfcomm_dev_add(req, dlc); +	if (IS_ERR(dev)) { +		rfcomm_dlc_put(dlc); +		return PTR_ERR(dev); +	} -	dev->tty_dev = tty_port_register_device(&dev->port, rfcomm_tty_driver, +	tty = tty_port_register_device(&dev->port, rfcomm_tty_driver,  			dev->id, NULL); -	if (IS_ERR(dev->tty_dev)) { -		err = PTR_ERR(dev->tty_dev); -		spin_lock(&rfcomm_dev_lock); -		list_del(&dev->list); -		spin_unlock(&rfcomm_dev_lock); -		goto free; +	if (IS_ERR(tty)) { +		tty_port_put(&dev->port); +		return PTR_ERR(tty);  	} +	dev->tty_dev = tty; +	rfcomm_reparent_device(dev);  	dev_set_drvdata(dev->tty_dev, dev);  	if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) @@ -318,24 +345,23 @@ out:  		BT_ERR("Failed to create channel attribute");  	return dev->id; - -free: -	kfree(dev); -	return err;  }  /* ---- Send buffer ---- */ -static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc) +static inline unsigned int rfcomm_room(struct rfcomm_dev *dev)  { -	/* We can't let it be zero, because we don't get a callback -	   when tx_credits becomes nonzero, hence we'd never wake up */ -	return dlc->mtu * (dlc->tx_credits?:1); +	struct rfcomm_dlc *dlc = dev->dlc; + +	/* Limit the outstanding number of packets not yet sent to 40 */ +	int pending = 40 - atomic_read(&dev->wmem_alloc); + +	return max(0, pending) * dlc->mtu;  }  static void rfcomm_wfree(struct sk_buff *skb)  {  	struct rfcomm_dev *dev = (void *) skb->sk; -	atomic_sub(skb->truesize, &dev->wmem_alloc); +	atomic_dec(&dev->wmem_alloc);  	if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))  		tty_port_tty_wakeup(&dev->port);  	tty_port_put(&dev->port); @@ -344,28 +370,24 @@ static void rfcomm_wfree(struct sk_buff *skb)  static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)  {  	tty_port_get(&dev->port); -	atomic_add(skb->truesize, &dev->wmem_alloc); +	atomic_inc(&dev->wmem_alloc);  	skb->sk = (void *) dev;  	skb->destructor = rfcomm_wfree;  }  static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority)  { -	if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { -		struct sk_buff *skb = alloc_skb(size, priority); -		if (skb) { -			rfcomm_set_owner_w(skb, dev); -			return skb; -		} -	} -	return NULL; +	struct sk_buff *skb = alloc_skb(size, priority); +	if (skb) +		rfcomm_set_owner_w(skb, dev); +	return skb;  }  /* ---- Device IOCTLs ---- */  #define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP)) -static int rfcomm_create_dev(struct sock *sk, void __user *arg) +static int __rfcomm_create_dev(struct sock *sk, void __user *arg)  {  	struct rfcomm_dev_req req;  	struct rfcomm_dlc *dlc; @@ -387,16 +409,22 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)  		dlc = rfcomm_pi(sk)->dlc;  		rfcomm_dlc_hold(dlc);  	} else { +		/* Validate the channel is unused */ +		dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); +		if (IS_ERR(dlc)) +			return PTR_ERR(dlc); +		else if (dlc) { +			rfcomm_dlc_put(dlc); +			return -EBUSY; +		}  		dlc = rfcomm_dlc_alloc(GFP_KERNEL);  		if (!dlc)  			return -ENOMEM;  	}  	id = rfcomm_dev_add(&req, dlc); -	if (id < 0) { -		rfcomm_dlc_put(dlc); +	if (id < 0)  		return id; -	}  	if (req.flags & (1 << RFCOMM_REUSE_DLC)) {  		/* DLC is now used by device. @@ -407,7 +435,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)  	return id;  } -static int rfcomm_release_dev(void __user *arg) +static int __rfcomm_release_dev(void __user *arg)  {  	struct rfcomm_dev_req req;  	struct rfcomm_dev *dev; @@ -427,6 +455,12 @@ static int rfcomm_release_dev(void __user *arg)  		return -EPERM;  	} +	/* only release once */ +	if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) { +		tty_port_put(&dev->port); +		return -EALREADY; +	} +  	if (req.flags & (1 << RFCOMM_HANGUP_NOW))  		rfcomm_dlc_close(dev->dlc, 0); @@ -437,13 +471,35 @@ static int rfcomm_release_dev(void __user *arg)  		tty_kref_put(tty);  	} -	if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)) +	if (!test_bit(RFCOMM_TTY_OWNED, &dev->status))  		tty_port_put(&dev->port);  	tty_port_put(&dev->port);  	return 0;  } +static int rfcomm_create_dev(struct sock *sk, void __user *arg) +{ +	int ret; + +	mutex_lock(&rfcomm_ioctl_mutex); +	ret = __rfcomm_create_dev(sk, arg); +	mutex_unlock(&rfcomm_ioctl_mutex); + +	return ret; +} + +static int rfcomm_release_dev(void __user *arg) +{ +	int ret; + +	mutex_lock(&rfcomm_ioctl_mutex); +	ret = __rfcomm_release_dev(arg); +	mutex_unlock(&rfcomm_ioctl_mutex); + +	return ret; +} +  static int rfcomm_get_dev_list(void __user *arg)  {  	struct rfcomm_dev *dev; @@ -468,10 +524,10 @@ static int rfcomm_get_dev_list(void __user *arg)  	di = dl->dev_info; -	spin_lock(&rfcomm_dev_lock); +	mutex_lock(&rfcomm_dev_lock);  	list_for_each_entry(dev, &rfcomm_dev_list, list) { -		if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) +		if (!tty_port_get(&dev->port))  			continue;  		(di + n)->id      = dev->id;  		(di + n)->flags   = dev->flags; @@ -479,11 +535,12 @@ static int rfcomm_get_dev_list(void __user *arg)  		(di + n)->channel = dev->channel;  		bacpy(&(di + n)->src, &dev->src);  		bacpy(&(di + n)->dst, &dev->dst); +		tty_port_put(&dev->port);  		if (++n >= dev_num)  			break;  	} -	spin_unlock(&rfcomm_dev_lock); +	mutex_unlock(&rfcomm_dev_lock);  	dl->dev_num = n;  	size = sizeof(*dl) + n * sizeof(*di); @@ -576,8 +633,7 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)  	dev->err = err;  	if (dlc->state == BT_CONNECTED) { -		device_move(dev->tty_dev, rfcomm_get_device(dev), -			    DPM_ORDER_DEV_AFTER_PARENT); +		rfcomm_reparent_device(dev);  		wake_up_interruptible(&dev->port.open_wait);  	} else if (dlc->state == BT_CLOSED) @@ -670,10 +726,22 @@ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)  	/* install the tty_port */  	err = tty_port_install(&dev->port, driver, tty); -	if (err) +	if (err) {  		rfcomm_tty_cleanup(tty); +		return err; +	} -	return err; +	/* take over the tty_port reference if the port was created with the +	 * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port +	 * when the last process closes the tty. The behaviour is expected by +	 * userspace. +	 */ +	if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { +		set_bit(RFCOMM_TTY_OWNED, &dev->status); +		tty_port_put(&dev->port); +	} + +	return 0;  }  static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) @@ -717,7 +785,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in  	struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;  	struct rfcomm_dlc *dlc = dev->dlc;  	struct sk_buff *skb; -	int err = 0, sent = 0, size; +	int sent = 0, size;  	BT_DBG("tty %p count %d", tty, count); @@ -725,7 +793,6 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in  		size = min_t(uint, count, dlc->mtu);  		skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); -  		if (!skb)  			break; @@ -733,32 +800,24 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in  		memcpy(skb_put(skb, size), buf + sent, size); -		err = rfcomm_dlc_send(dlc, skb); -		if (err < 0) { -			kfree_skb(skb); -			break; -		} +		rfcomm_dlc_send_noerror(dlc, skb);  		sent  += size;  		count -= size;  	} -	return sent ? sent : err; +	return sent;  }  static int rfcomm_tty_write_room(struct tty_struct *tty)  {  	struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; -	int room; +	int room = 0; -	BT_DBG("tty %p", tty); +	if (dev && dev->dlc) +		room = rfcomm_room(dev); -	if (!dev || !dev->dlc) -		return 0; - -	room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc); -	if (room < 0) -		room = 0; +	BT_DBG("tty %p room %d", tty, room);  	return room;  } @@ -1010,10 +1069,6 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)  	BT_DBG("tty %p dev %p", tty, dev);  	tty_port_hangup(&dev->port); - -	if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) && -	    !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)) -		tty_port_put(&dev->port);  }  static int rfcomm_tty_tiocmget(struct tty_struct *tty) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 96bd388d93a..c06dbd3938e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -92,9 +92,6 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)  	hcon->sco_data = conn;  	conn->hcon = hcon; -	conn->src = &hdev->bdaddr; -	conn->dst = &hcon->dst; -  	if (hdev->sco_mtu > 0)  		conn->mtu = hdev->sco_mtu;  	else @@ -156,16 +153,14 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,  static int sco_connect(struct sock *sk)  { -	bdaddr_t *src = &bt_sk(sk)->src; -	bdaddr_t *dst = &bt_sk(sk)->dst;  	struct sco_conn *conn;  	struct hci_conn *hcon;  	struct hci_dev  *hdev;  	int err, type; -	BT_DBG("%pMR -> %pMR", src, dst); +	BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); -	hdev = hci_get_route(dst, src); +	hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);  	if (!hdev)  		return -EHOSTUNREACH; @@ -182,7 +177,8 @@ static int sco_connect(struct sock *sk)  		goto done;  	} -	hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting); +	hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, +			       sco_pi(sk)->setting);  	if (IS_ERR(hcon)) {  		err = PTR_ERR(hcon);  		goto done; @@ -196,7 +192,7 @@ static int sco_connect(struct sock *sk)  	}  	/* Update source addr of the socket */ -	bacpy(src, conn->src); +	bacpy(&sco_pi(sk)->src, &hcon->src);  	err = sco_chan_add(conn, sk, NULL);  	if (err) @@ -270,7 +266,7 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)  		if (sk->sk_state != BT_LISTEN)  			continue; -		if (!bacmp(&bt_sk(sk)->src, ba)) +		if (!bacmp(&sco_pi(sk)->src, ba))  			return sk;  	} @@ -291,11 +287,11 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)  			continue;  		/* Exact match. */ -		if (!bacmp(&bt_sk(sk)->src, src)) +		if (!bacmp(&sco_pi(sk)->src, src))  			break;  		/* Closest match */ -		if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) +		if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))  			sk1 = sk;  	} @@ -475,7 +471,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le  		goto done;  	} -	bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); +	bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);  	sk->sk_state = BT_BOUND; @@ -505,7 +501,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen  	lock_sock(sk);  	/* Set destination address and psm */ -	bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); +	bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);  	err = sco_connect(sk);  	if (err) @@ -522,7 +518,7 @@ done:  static int sco_sock_listen(struct socket *sock, int backlog)  {  	struct sock *sk = sock->sk; -	bdaddr_t *src = &bt_sk(sk)->src; +	bdaddr_t *src = &sco_pi(sk)->src;  	int err = 0;  	BT_DBG("sk %p backlog %d", sk, backlog); @@ -626,9 +622,9 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len  	*len = sizeof(struct sockaddr_sco);  	if (peer) -		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); +		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);  	else -		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); +		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);  	return 0;  } @@ -680,20 +676,20 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)  		bacpy(&cp.bdaddr, &conn->dst);  		cp.pkt_type = cpu_to_le16(conn->pkt_type); -		cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40); -		cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40); +		cp.tx_bandwidth   = cpu_to_le32(0x00001f40); +		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);  		cp.content_format = cpu_to_le16(setting);  		switch (setting & SCO_AIRMODE_MASK) {  		case SCO_AIRMODE_TRANSP:  			if (conn->pkt_type & ESCO_2EV3) -				cp.max_latency = __constant_cpu_to_le16(0x0008); +				cp.max_latency = cpu_to_le16(0x0008);  			else -				cp.max_latency = __constant_cpu_to_le16(0x000D); +				cp.max_latency = cpu_to_le16(0x000D);  			cp.retrans_effort = 0x02;  			break;  		case SCO_AIRMODE_CVSD: -			cp.max_latency = __constant_cpu_to_le16(0xffff); +			cp.max_latency = cpu_to_le16(0xffff);  			cp.retrans_effort = 0xff;  			break;  		} @@ -715,7 +711,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {  		sco_conn_defer_accept(pi->conn->hcon, pi->setting);  		sk->sk_state = BT_CONFIG; -		msg->msg_namelen = 0;  		release_sock(sk);  		return 0; @@ -999,7 +994,7 @@ static void sco_conn_ready(struct sco_conn *conn)  	} else {  		sco_conn_lock(conn); -		parent = sco_get_sock_listen(conn->src); +		parent = sco_get_sock_listen(&conn->hcon->src);  		if (!parent) {  			sco_conn_unlock(conn);  			return; @@ -1017,8 +1012,8 @@ static void sco_conn_ready(struct sco_conn *conn)  		sco_sock_init(sk, parent); -		bacpy(&bt_sk(sk)->src, conn->src); -		bacpy(&bt_sk(sk)->dst, conn->dst); +		bacpy(&sco_pi(sk)->src, &conn->hcon->src); +		bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);  		hci_conn_hold(conn->hcon);  		__sco_chan_add(conn, sk, parent); @@ -1029,7 +1024,7 @@ static void sco_conn_ready(struct sco_conn *conn)  			sk->sk_state = BT_CONNECTED;  		/* Wake up parent */ -		parent->sk_data_ready(parent, 1); +		parent->sk_data_ready(parent);  		bh_unlock_sock(parent); @@ -1051,8 +1046,8 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)  		if (sk->sk_state != BT_LISTEN)  			continue; -		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || -		    !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { +		if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || +		    !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {  			lm |= HCI_LM_ACCEPT;  			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) @@ -1111,8 +1106,8 @@ static int sco_debugfs_show(struct seq_file *f, void *p)  	read_lock(&sco_sk_list.lock);  	sk_for_each(sk, &sco_sk_list.head) { -		seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, -			   &bt_sk(sk)->dst, sk->sk_state); +		seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, +			   &sco_pi(sk)->dst, sk->sk_state);  	}  	read_unlock(&sco_sk_list.lock); @@ -1181,15 +1176,14 @@ int __init sco_init(void)  		goto error;  	} -	if (bt_debugfs) { -		sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, -						  NULL, &sco_debugfs_fops); -		if (!sco_debugfs) -			BT_ERR("Failed to create SCO debug file"); -	} -  	BT_INFO("SCO socket layer initialized"); +	if (IS_ERR_OR_NULL(bt_debugfs)) +		return 0; + +	sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, +					  NULL, &sco_debugfs_fops); +  	return 0;  error: diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index b5562abdd6e..e33a982161c 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -28,20 +28,48 @@  #include <net/bluetooth/hci_core.h>  #include <net/bluetooth/l2cap.h>  #include <net/bluetooth/mgmt.h> -#include <net/bluetooth/smp.h> + +#include "smp.h"  #define SMP_TIMEOUT	msecs_to_jiffies(30000)  #define AUTH_REQ_MASK   0x07 -static inline void swap128(u8 src[16], u8 dst[16]) +#define SMP_FLAG_TK_VALID	1 +#define SMP_FLAG_CFM_PENDING	2 +#define SMP_FLAG_MITM_AUTH	3 +#define SMP_FLAG_COMPLETE	4 +#define SMP_FLAG_INITIATOR	5 + +struct smp_chan { +	struct l2cap_conn *conn; +	u8		preq[7]; /* SMP Pairing Request */ +	u8		prsp[7]; /* SMP Pairing Response */ +	u8		prnd[16]; /* SMP Pairing Random (local) */ +	u8		rrnd[16]; /* SMP Pairing Random (remote) */ +	u8		pcnf[16]; /* SMP Pairing Confirm */ +	u8		tk[16]; /* SMP Temporary Key */ +	u8		enc_key_size; +	u8		remote_key_dist; +	bdaddr_t	id_addr; +	u8		id_addr_type; +	u8		irk[16]; +	struct smp_csrk	*csrk; +	struct smp_csrk	*slave_csrk; +	struct smp_ltk	*ltk; +	struct smp_ltk	*slave_ltk; +	struct smp_irk	*remote_irk; +	unsigned long	flags; +}; + +static inline void swap128(const u8 src[16], u8 dst[16])  {  	int i;  	for (i = 0; i < 16; i++)  		dst[15 - i] = src[i];  } -static inline void swap56(u8 src[7], u8 dst[7]) +static inline void swap56(const u8 src[7], u8 dst[7])  {  	int i;  	for (i = 0; i < 7; i++) @@ -52,8 +80,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)  {  	struct blkcipher_desc desc;  	struct scatterlist sg; -	int err, iv_len; -	unsigned char iv[128]; +	uint8_t tmp[16], data[16]; +	int err;  	if (tfm == NULL) {  		BT_ERR("tfm %p", tfm); @@ -63,30 +91,92 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)  	desc.tfm = tfm;  	desc.flags = 0; -	err = crypto_blkcipher_setkey(tfm, k, 16); +	/* The most significant octet of key corresponds to k[0] */ +	swap128(k, tmp); + +	err = crypto_blkcipher_setkey(tfm, tmp, 16);  	if (err) {  		BT_ERR("cipher setkey failed: %d", err);  		return err;  	} -	sg_init_one(&sg, r, 16); +	/* Most significant octet of plaintextData corresponds to data[0] */ +	swap128(r, data); -	iv_len = crypto_blkcipher_ivsize(tfm); -	if (iv_len) { -		memset(&iv, 0xff, iv_len); -		crypto_blkcipher_set_iv(tfm, iv, iv_len); -	} +	sg_init_one(&sg, data, 16);  	err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);  	if (err)  		BT_ERR("Encrypt data error %d", err); +	/* Most significant octet of encryptedData corresponds to data[0] */ +	swap128(data, r); +  	return err;  } +static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3]) +{ +	u8 _res[16]; +	int err; + +	/* r' = padding || r */ +	memcpy(_res, r, 3); +	memset(_res + 3, 0, 13); + +	err = smp_e(tfm, irk, _res); +	if (err) { +		BT_ERR("Encrypt error"); +		return err; +	} + +	/* The output of the random address function ah is: +	 *	ah(h, r) = e(k, r') mod 2^24 +	 * The output of the security function e is then truncated to 24 bits +	 * by taking the least significant 24 bits of the output of e as the +	 * result of ah. +	 */ +	memcpy(res, _res, 3); + +	return 0; +} + +bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], +		     bdaddr_t *bdaddr) +{ +	u8 hash[3]; +	int err; + +	BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); + +	err = smp_ah(tfm, irk, &bdaddr->b[3], hash); +	if (err) +		return false; + +	return !memcmp(bdaddr->b, hash, 3); +} + +int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa) +{ +	int err; + +	get_random_bytes(&rpa->b[3], 3); + +	rpa->b[5] &= 0x3f;	/* Clear two most significant bits */ +	rpa->b[5] |= 0x40;	/* Set second most significant bit */ + +	err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); +	if (err < 0) +		return err; + +	BT_DBG("RPA %pMR", rpa); + +	return 0; +} +  static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16], -		u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, -		u8 _rat, bdaddr_t *ra, u8 res[16]) +		  u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, +		  u8 _rat, bdaddr_t *ra, u8 res[16])  {  	u8 p1[16], p2[16];  	int err; @@ -94,16 +184,15 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],  	memset(p1, 0, 16);  	/* p1 = pres || preq || _rat || _iat */ -	swap56(pres, p1); -	swap56(preq, p1 + 7); -	p1[14] = _rat; -	p1[15] = _iat; - -	memset(p2, 0, 16); +	p1[0] = _iat; +	p1[1] = _rat; +	memcpy(p1 + 2, preq, 7); +	memcpy(p1 + 9, pres, 7);  	/* p2 = padding || ia || ra */ -	baswap((bdaddr_t *) (p2 + 4), ia); -	baswap((bdaddr_t *) (p2 + 10), ra); +	memcpy(p2, ra, 6); +	memcpy(p2 + 6, ia, 6); +	memset(p2 + 12, 0, 4);  	/* res = r XOR p1 */  	u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); @@ -126,14 +215,14 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],  	return err;  } -static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], -			u8 r1[16], u8 r2[16], u8 _r[16]) +static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16], +		  u8 r2[16], u8 _r[16])  {  	int err;  	/* Just least significant octets from r1 and r2 are considered */ -	memcpy(_r, r1 + 8, 8); -	memcpy(_r + 8, r2 + 8, 8); +	memcpy(_r, r2, 8); +	memcpy(_r + 8, r1, 8);  	err = smp_e(tfm, k, _r);  	if (err) @@ -142,15 +231,8 @@ static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],  	return err;  } -static int smp_rand(u8 *buf) -{ -	get_random_bytes(buf, 16); - -	return 0; -} -  static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code, -						u16 dlen, void *data) +				     u16 dlen, void *data)  {  	struct sk_buff *skb;  	struct l2cap_hdr *lh; @@ -167,7 +249,7 @@ static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,  	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);  	lh->len = cpu_to_le16(sizeof(code) + dlen); -	lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP); +	lh->cid = cpu_to_le16(L2CAP_CID_SMP);  	memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); @@ -213,35 +295,48 @@ static __u8 seclevel_to_authreq(__u8 sec_level)  }  static void build_pairing_cmd(struct l2cap_conn *conn, -				struct smp_cmd_pairing *req, -				struct smp_cmd_pairing *rsp, -				__u8 authreq) +			      struct smp_cmd_pairing *req, +			      struct smp_cmd_pairing *rsp, __u8 authreq)  { -	u8 dist_keys = 0; +	struct smp_chan *smp = conn->smp_chan; +	struct hci_conn *hcon = conn->hcon; +	struct hci_dev *hdev = hcon->hdev; +	u8 local_dist = 0, remote_dist = 0;  	if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) { -		dist_keys = SMP_DIST_ENC_KEY; +		local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; +		remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;  		authreq |= SMP_AUTH_BONDING;  	} else {  		authreq &= ~SMP_AUTH_BONDING;  	} +	if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) +		remote_dist |= SMP_DIST_ID_KEY; + +	if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) +		local_dist |= SMP_DIST_ID_KEY; +  	if (rsp == NULL) {  		req->io_capability = conn->hcon->io_capability;  		req->oob_flag = SMP_OOB_NOT_PRESENT;  		req->max_key_size = SMP_MAX_ENC_KEY_SIZE; -		req->init_key_dist = 0; -		req->resp_key_dist = dist_keys; +		req->init_key_dist = local_dist; +		req->resp_key_dist = remote_dist;  		req->auth_req = (authreq & AUTH_REQ_MASK); + +		smp->remote_key_dist = remote_dist;  		return;  	}  	rsp->io_capability = conn->hcon->io_capability;  	rsp->oob_flag = SMP_OOB_NOT_PRESENT;  	rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; -	rsp->init_key_dist = 0; -	rsp->resp_key_dist = req->resp_key_dist & dist_keys; +	rsp->init_key_dist = req->init_key_dist & remote_dist; +	rsp->resp_key_dist = req->resp_key_dist & local_dist;  	rsp->auth_req = (authreq & AUTH_REQ_MASK); + +	smp->remote_key_dist = rsp->init_key_dist;  }  static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) @@ -249,7 +344,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)  	struct smp_chan *smp = conn->smp_chan;  	if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || -			(max_key_size < SMP_MIN_ENC_KEY_SIZE)) +	    (max_key_size < SMP_MIN_ENC_KEY_SIZE))  		return SMP_ENC_KEY_SIZE;  	smp->enc_key_size = max_key_size; @@ -257,21 +352,21 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)  	return 0;  } -static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) +static void smp_failure(struct l2cap_conn *conn, u8 reason)  {  	struct hci_conn *hcon = conn->hcon; -	if (send) +	if (reason)  		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), -								&reason); +			     &reason); -	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); -	mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, -			 hcon->dst_type, HCI_ERROR_AUTH_FAILURE); +	clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags); +	mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, +			 HCI_ERROR_AUTH_FAILURE);  	cancel_delayed_work_sync(&conn->security_timer); -	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) +	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))  		smp_chan_destroy(conn);  } @@ -290,6 +385,16 @@ static const u8 gen_method[5][5] = {  	{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP     },  }; +static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) +{ +	/* If either side has unknown io_caps, use JUST WORKS */ +	if (local_io > SMP_IO_KEYBOARD_DISPLAY || +	    remote_io > SMP_IO_KEYBOARD_DISPLAY) +		return JUST_WORKS; + +	return gen_method[remote_io][local_io]; +} +  static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,  						u8 local_io, u8 remote_io)  { @@ -301,33 +406,34 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,  	/* Initialize key for JUST WORKS */  	memset(smp->tk, 0, sizeof(smp->tk)); -	clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); +	clear_bit(SMP_FLAG_TK_VALID, &smp->flags);  	BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);  	/* If neither side wants MITM, use JUST WORKS */ -	/* If either side has unknown io_caps, use JUST WORKS */  	/* Otherwise, look up method from the table */ -	if (!(auth & SMP_AUTH_MITM) || -			local_io > SMP_IO_KEYBOARD_DISPLAY || -			remote_io > SMP_IO_KEYBOARD_DISPLAY) +	if (!(auth & SMP_AUTH_MITM))  		method = JUST_WORKS;  	else -		method = gen_method[remote_io][local_io]; +		method = get_auth_method(smp, local_io, remote_io);  	/* If not bonding, don't ask user to confirm a Zero TK */  	if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)  		method = JUST_WORKS; +	/* Don't confirm locally initiated pairing attempts */ +	if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) +		method = JUST_WORKS; +  	/* If Just Works, Continue with Zero TK */  	if (method == JUST_WORKS) { -		set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); +		set_bit(SMP_FLAG_TK_VALID, &smp->flags);  		return 0;  	}  	/* Not Just Works/Confirm results in MITM Authentication */  	if (method != JUST_CFM) -		set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); +		set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);  	/* If both devices have Keyoard-Display I/O, the master  	 * Confirms and the slave Enters the passkey. @@ -339,168 +445,145 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,  			method = REQ_PASSKEY;  	} -	/* Generate random passkey. Not valid until confirmed. */ +	/* Generate random passkey. */  	if (method == CFM_PASSKEY) { -		u8 key[16]; - -		memset(key, 0, sizeof(key)); +		memset(smp->tk, 0, sizeof(smp->tk));  		get_random_bytes(&passkey, sizeof(passkey));  		passkey %= 1000000; -		put_unaligned_le32(passkey, key); -		swap128(key, smp->tk); +		put_unaligned_le32(passkey, smp->tk);  		BT_DBG("PassKey: %d", passkey); +		set_bit(SMP_FLAG_TK_VALID, &smp->flags);  	}  	hci_dev_lock(hcon->hdev);  	if (method == REQ_PASSKEY) -		ret = mgmt_user_passkey_request(hcon->hdev, conn->dst, +		ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,  						hcon->type, hcon->dst_type); +	else if (method == JUST_CFM) +		ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, +						hcon->type, hcon->dst_type, +						passkey, 1);  	else -		ret = mgmt_user_confirm_request(hcon->hdev, conn->dst, +		ret = mgmt_user_passkey_notify(hcon->hdev, &hcon->dst,  						hcon->type, hcon->dst_type, -						cpu_to_le32(passkey), 0); +						passkey, 0);  	hci_dev_unlock(hcon->hdev);  	return ret;  } -static void confirm_work(struct work_struct *work) +static u8 smp_confirm(struct smp_chan *smp)  { -	struct smp_chan *smp = container_of(work, struct smp_chan, confirm);  	struct l2cap_conn *conn = smp->conn; -	struct crypto_blkcipher *tfm; +	struct hci_dev *hdev = conn->hcon->hdev; +	struct crypto_blkcipher *tfm = hdev->tfm_aes;  	struct smp_cmd_pairing_confirm cp;  	int ret; -	u8 res[16], reason;  	BT_DBG("conn %p", conn); -	tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); -	if (IS_ERR(tfm)) { -		reason = SMP_UNSPECIFIED; -		goto error; -	} +	/* Prevent mutual access to hdev->tfm_aes */ +	hci_dev_lock(hdev); -	smp->tfm = tfm; +	ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, +		     conn->hcon->init_addr_type, &conn->hcon->init_addr, +		     conn->hcon->resp_addr_type, &conn->hcon->resp_addr, +		     cp.confirm_val); -	if (conn->hcon->out) -		ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0, -			     conn->src, conn->hcon->dst_type, conn->dst, res); -	else -		ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, -			     conn->hcon->dst_type, conn->dst, 0, conn->src, -			     res); -	if (ret) { -		reason = SMP_UNSPECIFIED; -		goto error; -	} +	hci_dev_unlock(hdev); -	clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); +	if (ret) +		return SMP_UNSPECIFIED; -	swap128(res, cp.confirm_val); -	smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); +	clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags); -	return; +	smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); -error: -	smp_failure(conn, reason, 1); +	return 0;  } -static void random_work(struct work_struct *work) +static u8 smp_random(struct smp_chan *smp)  { -	struct smp_chan *smp = container_of(work, struct smp_chan, random);  	struct l2cap_conn *conn = smp->conn;  	struct hci_conn *hcon = conn->hcon; -	struct crypto_blkcipher *tfm = smp->tfm; -	u8 reason, confirm[16], res[16], key[16]; +	struct hci_dev *hdev = hcon->hdev; +	struct crypto_blkcipher *tfm = hdev->tfm_aes; +	u8 confirm[16];  	int ret; -	if (IS_ERR_OR_NULL(tfm)) { -		reason = SMP_UNSPECIFIED; -		goto error; -	} +	if (IS_ERR_OR_NULL(tfm)) +		return SMP_UNSPECIFIED;  	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); -	if (hcon->out) -		ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0, -			     conn->src, hcon->dst_type, conn->dst, res); -	else -		ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, -			     hcon->dst_type, conn->dst, 0, conn->src, res); -	if (ret) { -		reason = SMP_UNSPECIFIED; -		goto error; -	} +	/* Prevent mutual access to hdev->tfm_aes */ +	hci_dev_lock(hdev); + +	ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, +		     hcon->init_addr_type, &hcon->init_addr, +		     hcon->resp_addr_type, &hcon->resp_addr, confirm); -	swap128(res, confirm); +	hci_dev_unlock(hdev); + +	if (ret) +		return SMP_UNSPECIFIED;  	if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {  		BT_ERR("Pairing failed (confirmation values mismatch)"); -		reason = SMP_CONFIRM_FAILED; -		goto error; +		return SMP_CONFIRM_FAILED;  	}  	if (hcon->out) { -		u8 stk[16], rand[8]; -		__le16 ediv; - -		memset(rand, 0, sizeof(rand)); -		ediv = 0; +		u8 stk[16]; +		__le64 rand = 0; +		__le16 ediv = 0; -		smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key); -		swap128(key, stk); +		smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk);  		memset(stk + smp->enc_key_size, 0,  		       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); -		if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) { -			reason = SMP_UNSPECIFIED; -			goto error; -		} +		if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) +			return SMP_UNSPECIFIED;  		hci_le_start_enc(hcon, ediv, rand, stk);  		hcon->enc_key_size = smp->enc_key_size;  	} else { -		u8 stk[16], r[16], rand[8]; -		__le16 ediv; +		u8 stk[16], auth; +		__le64 rand = 0; +		__le16 ediv = 0; -		memset(rand, 0, sizeof(rand)); -		ediv = 0; +		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), +			     smp->prnd); -		swap128(smp->prnd, r); -		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); - -		smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key); -		swap128(key, stk); +		smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk);  		memset(stk + smp->enc_key_size, 0, -				SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); +		       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); + +		if (hcon->pending_sec_level == BT_SECURITY_HIGH) +			auth = 1; +		else +			auth = 0; -		hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type, -			    HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size, +		hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, +			    HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,  			    ediv, rand);  	} -	return; - -error: -	smp_failure(conn, reason, 1); +	return 0;  }  static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)  {  	struct smp_chan *smp; -	smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC); +	smp = kzalloc(sizeof(*smp), GFP_ATOMIC);  	if (!smp)  		return NULL; -	INIT_WORK(&smp->confirm, confirm_work); -	INIT_WORK(&smp->random, random_work); -  	smp->conn = conn;  	conn->smp_chan = smp;  	conn->hcon->smp_conn = conn; @@ -513,11 +596,33 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)  void smp_chan_destroy(struct l2cap_conn *conn)  {  	struct smp_chan *smp = conn->smp_chan; +	bool complete;  	BUG_ON(!smp); -	if (smp->tfm) -		crypto_free_blkcipher(smp->tfm); +	complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); +	mgmt_smp_complete(conn->hcon, complete); + +	kfree(smp->csrk); +	kfree(smp->slave_csrk); + +	/* If pairing failed clean up any keys we might have */ +	if (!complete) { +		if (smp->ltk) { +			list_del(&smp->ltk->list); +			kfree(smp->ltk); +		} + +		if (smp->slave_ltk) { +			list_del(&smp->slave_ltk->list); +			kfree(smp->slave_ltk); +		} + +		if (smp->remote_irk) { +			list_del(&smp->remote_irk->list); +			kfree(smp->remote_irk); +		} +	}  	kfree(smp);  	conn->smp_chan = NULL; @@ -530,7 +635,6 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)  	struct l2cap_conn *conn = hcon->smp_conn;  	struct smp_chan *smp;  	u32 value; -	u8 key[16];  	BT_DBG(""); @@ -542,26 +646,28 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)  	switch (mgmt_op) {  	case MGMT_OP_USER_PASSKEY_REPLY:  		value = le32_to_cpu(passkey); -		memset(key, 0, sizeof(key)); +		memset(smp->tk, 0, sizeof(smp->tk));  		BT_DBG("PassKey: %d", value); -		put_unaligned_le32(value, key); -		swap128(key, smp->tk); +		put_unaligned_le32(value, smp->tk);  		/* Fall Through */  	case MGMT_OP_USER_CONFIRM_REPLY: -		set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); +		set_bit(SMP_FLAG_TK_VALID, &smp->flags);  		break;  	case MGMT_OP_USER_PASSKEY_NEG_REPLY:  	case MGMT_OP_USER_CONFIRM_NEG_REPLY: -		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); +		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);  		return 0;  	default: -		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); +		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);  		return -EOPNOTSUPP;  	}  	/* If it is our turn to send Pairing Confirm, do so now */ -	if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags)) -		queue_work(hcon->hdev->workqueue, &smp->confirm); +	if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) { +		u8 rsp = smp_confirm(smp); +		if (rsp) +			smp_failure(conn, rsp); +	}  	return 0;  } @@ -570,12 +676,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct smp_cmd_pairing rsp, *req = (void *) skb->data;  	struct smp_chan *smp; -	u8 key_size; -	u8 auth = SMP_AUTH_NONE; +	u8 key_size, auth, sec_level;  	int ret;  	BT_DBG("conn %p", conn); +	if (skb->len < sizeof(*req)) +		return SMP_INVALID_PARAMS; +  	if (conn->hcon->link_mode & HCI_LM_MASTER)  		return SMP_CMD_NOTSUPP; @@ -592,10 +700,21 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)  	skb_pull(skb, sizeof(*req));  	/* We didn't start the pairing, so match remote */ -	if (req->auth_req & SMP_AUTH_BONDING) -		auth = req->auth_req; +	auth = req->auth_req; -	conn->hcon->pending_sec_level = authreq_to_seclevel(auth); +	sec_level = authreq_to_seclevel(auth); +	if (sec_level > conn->hcon->pending_sec_level) +		conn->hcon->pending_sec_level = sec_level; + +	/* If we need MITM check that it can be acheived */ +	if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { +		u8 method; + +		method = get_auth_method(smp, conn->hcon->io_capability, +					 req->io_capability); +		if (method == JUST_WORKS || method == JUST_CFM) +			return SMP_AUTH_REQUIREMENTS; +	}  	build_pairing_cmd(conn, req, &rsp, auth); @@ -603,9 +722,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)  	if (check_enc_key_size(conn, key_size))  		return SMP_ENC_KEY_SIZE; -	ret = smp_rand(smp->prnd); -	if (ret) -		return SMP_UNSPECIFIED; +	get_random_bytes(smp->prnd, sizeof(smp->prnd));  	smp->prsp[0] = SMP_CMD_PAIRING_RSP;  	memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); @@ -617,6 +734,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)  	if (ret)  		return SMP_UNSPECIFIED; +	clear_bit(SMP_FLAG_INITIATOR, &smp->flags); +  	return 0;  } @@ -624,12 +743,14 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct smp_cmd_pairing *req, *rsp = (void *) skb->data;  	struct smp_chan *smp = conn->smp_chan; -	struct hci_dev *hdev = conn->hcon->hdev;  	u8 key_size, auth = SMP_AUTH_NONE;  	int ret;  	BT_DBG("conn %p", conn); +	if (skb->len < sizeof(*rsp)) +		return SMP_INVALID_PARAMS; +  	if (!(conn->hcon->link_mode & HCI_LM_MASTER))  		return SMP_CMD_NOTSUPP; @@ -641,15 +762,28 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)  	if (check_enc_key_size(conn, key_size))  		return SMP_ENC_KEY_SIZE; -	ret = smp_rand(smp->prnd); -	if (ret) -		return SMP_UNSPECIFIED; +	/* If we need MITM check that it can be acheived */ +	if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { +		u8 method; + +		method = get_auth_method(smp, req->io_capability, +					 rsp->io_capability); +		if (method == JUST_WORKS || method == JUST_CFM) +			return SMP_AUTH_REQUIREMENTS; +	} + +	get_random_bytes(smp->prnd, sizeof(smp->prnd));  	smp->prsp[0] = SMP_CMD_PAIRING_RSP;  	memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); +	/* Update remote key distribution in case the remote cleared +	 * some bits that we had enabled in our request. +	 */ +	smp->remote_key_dist &= rsp->resp_key_dist; +  	if ((req->auth_req & SMP_AUTH_BONDING) && -			(rsp->auth_req & SMP_AUTH_BONDING)) +	    (rsp->auth_req & SMP_AUTH_BONDING))  		auth = SMP_AUTH_BONDING;  	auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; @@ -658,13 +792,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)  	if (ret)  		return SMP_UNSPECIFIED; -	set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); +	set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);  	/* Can't compose response until we have been confirmed */ -	if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) -		return 0; - -	queue_work(hdev->workqueue, &smp->confirm); +	if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) +		return smp_confirm(smp);  	return 0;  } @@ -672,24 +804,22 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)  static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct smp_chan *smp = conn->smp_chan; -	struct hci_dev *hdev = conn->hcon->hdev;  	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); +	if (skb->len < sizeof(smp->pcnf)) +		return SMP_INVALID_PARAMS; +  	memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));  	skb_pull(skb, sizeof(smp->pcnf)); -	if (conn->hcon->out) { -		u8 random[16]; - -		swap128(smp->prnd, random); -		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), -								random); -	} else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) { -		queue_work(hdev->workqueue, &smp->confirm); -	} else { -		set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); -	} +	if (conn->hcon->out) +		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), +			     smp->prnd); +	else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) +		return smp_confirm(smp); +	else +		set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);  	return 0;  } @@ -697,16 +827,16 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)  static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct smp_chan *smp = conn->smp_chan; -	struct hci_dev *hdev = conn->hcon->hdev;  	BT_DBG("conn %p", conn); -	swap128(skb->data, smp->rrnd); -	skb_pull(skb, sizeof(smp->rrnd)); +	if (skb->len < sizeof(smp->rrnd)) +		return SMP_INVALID_PARAMS; -	queue_work(hdev->workqueue, &smp->random); +	memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); +	skb_pull(skb, sizeof(smp->rrnd)); -	return 0; +	return smp_random(smp);  }  static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) @@ -714,7 +844,8 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)  	struct smp_ltk *key;  	struct hci_conn *hcon = conn->hcon; -	key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type); +	key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type, +				   hcon->out);  	if (!key)  		return 0; @@ -728,18 +859,27 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)  	hcon->enc_key_size = key->enc_size;  	return 1; -  } +  static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)  {  	struct smp_cmd_security_req *rp = (void *) skb->data;  	struct smp_cmd_pairing cp;  	struct hci_conn *hcon = conn->hcon;  	struct smp_chan *smp; +	u8 sec_level;  	BT_DBG("conn %p", conn); -	hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); +	if (skb->len < sizeof(*rp)) +		return SMP_INVALID_PARAMS; + +	if (!(conn->hcon->link_mode & HCI_LM_MASTER)) +		return SMP_CMD_NOTSUPP; + +	sec_level = authreq_to_seclevel(rp->auth_req); +	if (sec_level > hcon->pending_sec_level) +		hcon->pending_sec_level = sec_level;  	if (smp_ltk_encrypt(conn, hcon->pending_sec_level))  		return 0; @@ -759,29 +899,46 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)  	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); +	clear_bit(SMP_FLAG_INITIATOR, &smp->flags); +  	return 0;  } +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level) +{ +	if (sec_level == BT_SECURITY_LOW) +		return true; + +	if (hcon->sec_level >= sec_level) +		return true; + +	return false; +} +  int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)  {  	struct l2cap_conn *conn = hcon->l2cap_data; -	struct smp_chan *smp = conn->smp_chan; +	struct smp_chan *smp;  	__u8 authreq;  	BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); -	if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) +	/* This may be NULL if there's an unexpected disconnection */ +	if (!conn)  		return 1; -	if (sec_level == BT_SECURITY_LOW) +	if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))  		return 1; -	if (hcon->sec_level >= sec_level) +	if (smp_sufficient_security(hcon, sec_level))  		return 1; +	if (sec_level > hcon->pending_sec_level) +		hcon->pending_sec_level = sec_level; +  	if (hcon->link_mode & HCI_LM_MASTER) -		if (smp_ltk_encrypt(conn, sec_level)) -			goto done; +		if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) +			return 0;  	if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))  		return 0; @@ -792,6 +949,13 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)  	authreq = seclevel_to_authreq(sec_level); +	/* Require MITM if IO Capability allows or the security level +	 * requires it. +	 */ +	if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || +	    hcon->pending_sec_level > BT_SECURITY_MEDIUM) +		authreq |= SMP_AUTH_MITM; +  	if (hcon->link_mode & HCI_LM_MASTER) {  		struct smp_cmd_pairing cp; @@ -806,8 +970,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)  		smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);  	} -done: -	hcon->pending_sec_level = sec_level; +	set_bit(SMP_FLAG_INITIATOR, &smp->flags);  	return 0;  } @@ -817,6 +980,15 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)  	struct smp_cmd_encrypt_info *rp = (void *) skb->data;  	struct smp_chan *smp = conn->smp_chan; +	BT_DBG("conn %p", conn); + +	if (skb->len < sizeof(*rp)) +		return SMP_INVALID_PARAMS; + +	/* Ignore this PDU if it wasn't requested */ +	if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) +		return 0; +  	skb_pull(skb, sizeof(*rp));  	memcpy(smp->tk, rp->ltk, sizeof(smp->tk)); @@ -830,16 +1002,138 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)  	struct smp_chan *smp = conn->smp_chan;  	struct hci_dev *hdev = conn->hcon->hdev;  	struct hci_conn *hcon = conn->hcon; +	struct smp_ltk *ltk;  	u8 authenticated; +	BT_DBG("conn %p", conn); + +	if (skb->len < sizeof(*rp)) +		return SMP_INVALID_PARAMS; + +	/* Ignore this PDU if it wasn't requested */ +	if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) +		return 0; + +	/* Mark the information as received */ +	smp->remote_key_dist &= ~SMP_DIST_ENC_KEY; + +	skb_pull(skb, sizeof(*rp)); + +	hci_dev_lock(hdev); +	authenticated = (hcon->sec_level == BT_SECURITY_HIGH); +	ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, +			  authenticated, smp->tk, smp->enc_key_size, +			  rp->ediv, rp->rand); +	smp->ltk = ltk; +	if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) +		smp_distribute_keys(conn); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ +	struct smp_cmd_ident_info *info = (void *) skb->data; +	struct smp_chan *smp = conn->smp_chan; + +	BT_DBG(""); + +	if (skb->len < sizeof(*info)) +		return SMP_INVALID_PARAMS; + +	/* Ignore this PDU if it wasn't requested */ +	if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) +		return 0; + +	skb_pull(skb, sizeof(*info)); + +	memcpy(smp->irk, info->irk, 16); + +	return 0; +} + +static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, +				   struct sk_buff *skb) +{ +	struct smp_cmd_ident_addr_info *info = (void *) skb->data; +	struct smp_chan *smp = conn->smp_chan; +	struct hci_conn *hcon = conn->hcon; +	bdaddr_t rpa; + +	BT_DBG(""); + +	if (skb->len < sizeof(*info)) +		return SMP_INVALID_PARAMS; + +	/* Ignore this PDU if it wasn't requested */ +	if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) +		return 0; + +	/* Mark the information as received */ +	smp->remote_key_dist &= ~SMP_DIST_ID_KEY; + +	skb_pull(skb, sizeof(*info)); + +	/* Strictly speaking the Core Specification (4.1) allows sending +	 * an empty address which would force us to rely on just the IRK +	 * as "identity information". However, since such +	 * implementations are not known of and in order to not over +	 * complicate our implementation, simply pretend that we never +	 * received an IRK for such a device. +	 */ +	if (!bacmp(&info->bdaddr, BDADDR_ANY)) { +		BT_ERR("Ignoring IRK with no identity address"); +		smp_distribute_keys(conn); +		return 0; +	} + +	bacpy(&smp->id_addr, &info->bdaddr); +	smp->id_addr_type = info->addr_type; + +	if (hci_bdaddr_is_rpa(&hcon->dst, hcon->dst_type)) +		bacpy(&rpa, &hcon->dst); +	else +		bacpy(&rpa, BDADDR_ANY); + +	smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr, +				      smp->id_addr_type, smp->irk, &rpa); + +	smp_distribute_keys(conn); + +	return 0; +} + +static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ +	struct smp_cmd_sign_info *rp = (void *) skb->data; +	struct smp_chan *smp = conn->smp_chan; +	struct hci_dev *hdev = conn->hcon->hdev; +	struct smp_csrk *csrk; + +	BT_DBG("conn %p", conn); + +	if (skb->len < sizeof(*rp)) +		return SMP_INVALID_PARAMS; + +	/* Ignore this PDU if it wasn't requested */ +	if (!(smp->remote_key_dist & SMP_DIST_SIGN)) +		return 0; + +	/* Mark the information as received */ +	smp->remote_key_dist &= ~SMP_DIST_SIGN; +  	skb_pull(skb, sizeof(*rp));  	hci_dev_lock(hdev); -	authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH); -	hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type, -		    HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size, -		    rp->ediv, rp->rand); -	smp_distribute_keys(conn, 1); +	csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); +	if (csrk) { +		csrk->master = 0x01; +		memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); +	} +	smp->csrk = csrk; +	if (!(smp->remote_key_dist & SMP_DIST_SIGN)) +		smp_distribute_keys(conn);  	hci_dev_unlock(hdev);  	return 0; @@ -847,16 +1141,27 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)  int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  { -	__u8 code = skb->data[0]; -	__u8 reason; +	struct hci_conn *hcon = conn->hcon; +	__u8 code, reason;  	int err = 0; -	if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) { +	if (hcon->type != LE_LINK) { +		kfree_skb(skb); +		return 0; +	} + +	if (skb->len < 1) { +		kfree_skb(skb); +		return -EILSEQ; +	} + +	if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {  		err = -ENOTSUPP;  		reason = SMP_PAIRING_NOTSUPP;  		goto done;  	} +	code = skb->data[0];  	skb_pull(skb, sizeof(code));  	/* @@ -878,7 +1183,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  		break;  	case SMP_CMD_PAIRING_FAIL: -		smp_failure(conn, skb->data[0], 0); +		smp_failure(conn, 0);  		reason = 0;  		err = -EPERM;  		break; @@ -908,10 +1213,15 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  		break;  	case SMP_CMD_IDENT_INFO: +		reason = smp_cmd_ident_info(conn, skb); +		break; +  	case SMP_CMD_IDENT_ADDR_INFO: +		reason = smp_cmd_ident_addr_info(conn, skb); +		break; +  	case SMP_CMD_SIGN_INFO: -		/* Just ignored */ -		reason = 0; +		reason = smp_cmd_sign_info(conn, skb);  		break;  	default: @@ -924,32 +1234,84 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  done:  	if (reason) -		smp_failure(conn, reason, 1); +		smp_failure(conn, reason);  	kfree_skb(skb);  	return err;  } -int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) +static void smp_notify_keys(struct l2cap_conn *conn) +{ +	struct smp_chan *smp = conn->smp_chan; +	struct hci_conn *hcon = conn->hcon; +	struct hci_dev *hdev = hcon->hdev; +	struct smp_cmd_pairing *req = (void *) &smp->preq[1]; +	struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; +	bool persistent; + +	if (smp->remote_irk) { +		mgmt_new_irk(hdev, smp->remote_irk); +		/* Now that user space can be considered to know the +		 * identity address track the connection based on it +		 * from now on. +		 */ +		bacpy(&hcon->dst, &smp->remote_irk->bdaddr); +		hcon->dst_type = smp->remote_irk->addr_type; +		l2cap_conn_update_id_addr(hcon); +	} + +	/* The LTKs and CSRKs should be persistent only if both sides +	 * had the bonding bit set in their authentication requests. +	 */ +	persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); + +	if (smp->csrk) { +		smp->csrk->bdaddr_type = hcon->dst_type; +		bacpy(&smp->csrk->bdaddr, &hcon->dst); +		mgmt_new_csrk(hdev, smp->csrk, persistent); +	} + +	if (smp->slave_csrk) { +		smp->slave_csrk->bdaddr_type = hcon->dst_type; +		bacpy(&smp->slave_csrk->bdaddr, &hcon->dst); +		mgmt_new_csrk(hdev, smp->slave_csrk, persistent); +	} + +	if (smp->ltk) { +		smp->ltk->bdaddr_type = hcon->dst_type; +		bacpy(&smp->ltk->bdaddr, &hcon->dst); +		mgmt_new_ltk(hdev, smp->ltk, persistent); +	} + +	if (smp->slave_ltk) { +		smp->slave_ltk->bdaddr_type = hcon->dst_type; +		bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); +		mgmt_new_ltk(hdev, smp->slave_ltk, persistent); +	} +} + +int smp_distribute_keys(struct l2cap_conn *conn)  {  	struct smp_cmd_pairing *req, *rsp;  	struct smp_chan *smp = conn->smp_chan; +	struct hci_conn *hcon = conn->hcon; +	struct hci_dev *hdev = hcon->hdev;  	__u8 *keydist; -	BT_DBG("conn %p force %d", conn, force); +	BT_DBG("conn %p", conn); -	if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) +	if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))  		return 0;  	rsp = (void *) &smp->prsp[1];  	/* The responder sends its keys first */ -	if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) +	if (hcon->out && (smp->remote_key_dist & 0x07))  		return 0;  	req = (void *) &smp->preq[1]; -	if (conn->hcon->out) { +	if (hcon->out) {  		keydist = &rsp->init_key_dist;  		*keydist &= req->init_key_dist;  	} else { @@ -957,28 +1319,30 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)  		*keydist &= req->resp_key_dist;  	} -  	BT_DBG("keydist 0x%x", *keydist);  	if (*keydist & SMP_DIST_ENC_KEY) {  		struct smp_cmd_encrypt_info enc;  		struct smp_cmd_master_ident ident; -		struct hci_conn *hcon = conn->hcon; +		struct smp_ltk *ltk;  		u8 authenticated;  		__le16 ediv; +		__le64 rand;  		get_random_bytes(enc.ltk, sizeof(enc.ltk));  		get_random_bytes(&ediv, sizeof(ediv)); -		get_random_bytes(ident.rand, sizeof(ident.rand)); +		get_random_bytes(&rand, sizeof(rand));  		smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);  		authenticated = hcon->sec_level == BT_SECURITY_HIGH; -		hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type, -			    HCI_SMP_LTK_SLAVE, 1, authenticated, -			    enc.ltk, smp->enc_key_size, ediv, ident.rand); +		ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, +				  HCI_SMP_LTK_SLAVE, authenticated, enc.ltk, +				  smp->enc_key_size, ediv, rand); +		smp->slave_ltk = ltk;  		ident.ediv = ediv; +		ident.rand = rand;  		smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); @@ -989,37 +1353,54 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)  		struct smp_cmd_ident_addr_info addrinfo;  		struct smp_cmd_ident_info idinfo; -		/* Send a dummy key */ -		get_random_bytes(idinfo.irk, sizeof(idinfo.irk)); +		memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk));  		smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); -		/* Just public address */ -		memset(&addrinfo, 0, sizeof(addrinfo)); -		bacpy(&addrinfo.bdaddr, conn->src); +		/* The hci_conn contains the local identity address +		 * after the connection has been established. +		 * +		 * This is true even when the connection has been +		 * established using a resolvable random address. +		 */ +		bacpy(&addrinfo.bdaddr, &hcon->src); +		addrinfo.addr_type = hcon->src_type;  		smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), -								&addrinfo); +			     &addrinfo);  		*keydist &= ~SMP_DIST_ID_KEY;  	}  	if (*keydist & SMP_DIST_SIGN) {  		struct smp_cmd_sign_info sign; +		struct smp_csrk *csrk; -		/* Send a dummy key */ +		/* Generate a new random key */  		get_random_bytes(sign.csrk, sizeof(sign.csrk)); +		csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); +		if (csrk) { +			csrk->master = 0x00; +			memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); +		} +		smp->slave_csrk = csrk; +  		smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);  		*keydist &= ~SMP_DIST_SIGN;  	} -	if (conn->hcon->out || force) { -		clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); -		cancel_delayed_work_sync(&conn->security_timer); -		smp_chan_destroy(conn); -	} +	/* If there are still keys to be received wait for them */ +	if ((smp->remote_key_dist & 0x07)) +		return 0; + +	clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); +	cancel_delayed_work_sync(&conn->security_timer); +	set_bit(SMP_FLAG_COMPLETE, &smp->flags); +	smp_notify_keys(conn); + +	smp_chan_destroy(conn);  	return 0;  } diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h new file mode 100644 index 00000000000..5a8dc36460a --- /dev/null +++ b/net/bluetooth/smp.h @@ -0,0 +1,132 @@ +/* +   BlueZ - Bluetooth protocol stack for Linux +   Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). + +   This program is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License version 2 as +   published by the Free Software Foundation; + +   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. +   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY +   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES +   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, +   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS +   SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __SMP_H +#define __SMP_H + +struct smp_command_hdr { +	__u8	code; +} __packed; + +#define SMP_CMD_PAIRING_REQ	0x01 +#define SMP_CMD_PAIRING_RSP	0x02 +struct smp_cmd_pairing { +	__u8	io_capability; +	__u8	oob_flag; +	__u8	auth_req; +	__u8	max_key_size; +	__u8	init_key_dist; +	__u8	resp_key_dist; +} __packed; + +#define SMP_IO_DISPLAY_ONLY	0x00 +#define SMP_IO_DISPLAY_YESNO	0x01 +#define SMP_IO_KEYBOARD_ONLY	0x02 +#define SMP_IO_NO_INPUT_OUTPUT	0x03 +#define SMP_IO_KEYBOARD_DISPLAY	0x04 + +#define SMP_OOB_NOT_PRESENT	0x00 +#define SMP_OOB_PRESENT		0x01 + +#define SMP_DIST_ENC_KEY	0x01 +#define SMP_DIST_ID_KEY		0x02 +#define SMP_DIST_SIGN		0x04 + +#define SMP_AUTH_NONE		0x00 +#define SMP_AUTH_BONDING	0x01 +#define SMP_AUTH_MITM		0x04 + +#define SMP_CMD_PAIRING_CONFIRM	0x03 +struct smp_cmd_pairing_confirm { +	__u8	confirm_val[16]; +} __packed; + +#define SMP_CMD_PAIRING_RANDOM	0x04 +struct smp_cmd_pairing_random { +	__u8	rand_val[16]; +} __packed; + +#define SMP_CMD_PAIRING_FAIL	0x05 +struct smp_cmd_pairing_fail { +	__u8	reason; +} __packed; + +#define SMP_CMD_ENCRYPT_INFO	0x06 +struct smp_cmd_encrypt_info { +	__u8	ltk[16]; +} __packed; + +#define SMP_CMD_MASTER_IDENT	0x07 +struct smp_cmd_master_ident { +	__le16	ediv; +	__le64	rand; +} __packed; + +#define SMP_CMD_IDENT_INFO	0x08 +struct smp_cmd_ident_info { +	__u8	irk[16]; +} __packed; + +#define SMP_CMD_IDENT_ADDR_INFO	0x09 +struct smp_cmd_ident_addr_info { +	__u8	addr_type; +	bdaddr_t bdaddr; +} __packed; + +#define SMP_CMD_SIGN_INFO	0x0a +struct smp_cmd_sign_info { +	__u8	csrk[16]; +} __packed; + +#define SMP_CMD_SECURITY_REQ	0x0b +struct smp_cmd_security_req { +	__u8	auth_req; +} __packed; + +#define SMP_PASSKEY_ENTRY_FAILED	0x01 +#define SMP_OOB_NOT_AVAIL		0x02 +#define SMP_AUTH_REQUIREMENTS		0x03 +#define SMP_CONFIRM_FAILED		0x04 +#define SMP_PAIRING_NOTSUPP		0x05 +#define SMP_ENC_KEY_SIZE		0x06 +#define SMP_CMD_NOTSUPP			0x07 +#define SMP_UNSPECIFIED			0x08 +#define SMP_REPEATED_ATTEMPTS		0x09 +#define SMP_INVALID_PARAMS		0x0a + +#define SMP_MIN_ENC_KEY_SIZE		7 +#define SMP_MAX_ENC_KEY_SIZE		16 + +/* SMP Commands */ +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); +int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); +int smp_distribute_keys(struct l2cap_conn *conn); +int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); + +void smp_chan_destroy(struct l2cap_conn *conn); + +bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], +		     bdaddr_t *bdaddr); +int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa); + +#endif /* __SMP_H */  | 
